summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py55
-rw-r--r--numpy/_build_utils/__init__.py1
-rw-r--r--numpy/_build_utils/apple_accelerate.py2
-rw-r--r--numpy/_globals.py4
-rw-r--r--numpy/_pytesttester.py27
-rw-r--r--numpy/compat/__init__.py2
-rw-r--r--numpy/compat/_inspect.py2
-rw-r--r--numpy/compat/py3k.py183
-rw-r--r--numpy/compat/setup.py2
-rw-r--r--numpy/compat/tests/test_compat.py2
-rw-r--r--numpy/conftest.py25
-rw-r--r--numpy/core/__init__.py9
-rw-r--r--numpy/core/_add_newdocs.py208
-rw-r--r--numpy/core/_asarray.py2
-rw-r--r--numpy/core/_dtype.py18
-rw-r--r--numpy/core/_exceptions.py1
-rw-r--r--numpy/core/_internal.py108
-rw-r--r--numpy/core/_methods.py23
-rw-r--r--numpy/core/_type_aliases.py20
-rw-r--r--numpy/core/_ufunc_config.py24
-rw-r--r--numpy/core/arrayprint.py96
-rw-r--r--numpy/core/code_generators/__init__.py1
-rw-r--r--numpy/core/code_generators/cversions.txt3
-rw-r--r--numpy/core/code_generators/genapi.py27
-rw-r--r--numpy/core/code_generators/generate_numpy_api.py19
-rw-r--r--numpy/core/code_generators/generate_ufunc_api.py22
-rw-r--r--numpy/core/code_generators/generate_umath.py101
-rw-r--r--numpy/core/code_generators/numpy_api.py2
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py132
-rw-r--r--numpy/core/cversions.py2
-rw-r--r--numpy/core/defchararray.py98
-rw-r--r--numpy/core/einsumfunc.py74
-rw-r--r--numpy/core/fromnumeric.py129
-rw-r--r--numpy/core/function_base.py10
-rw-r--r--numpy/core/getlimits.py35
-rw-r--r--numpy/core/include/numpy/arrayscalars.h8
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h17
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h19
-rw-r--r--numpy/core/include/numpy/npy_1_7_deprecated_api.h7
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h64
-rw-r--r--numpy/core/include/numpy/npy_common.h10
-rw-r--r--numpy/core/include/numpy/numpyconfig.h4
-rw-r--r--numpy/core/include/numpy/random/bitgen.h (renamed from numpy/random/src/bitgen.h)2
-rw-r--r--numpy/core/include/numpy/random/distributions.h (renamed from numpy/random/src/distributions/distributions.h)108
-rw-r--r--numpy/core/include/numpy/ufuncobject.h4
-rw-r--r--numpy/core/machar.py4
-rw-r--r--numpy/core/memmap.py8
-rw-r--r--numpy/core/multiarray.py19
-rw-r--r--numpy/core/numeric.py98
-rw-r--r--numpy/core/numerictypes.py45
-rw-r--r--numpy/core/records.py112
-rw-r--r--numpy/core/setup.py27
-rw-r--r--numpy/core/setup_common.py52
-rw-r--r--numpy/core/shape_base.py19
-rw-r--r--numpy/core/src/common/array_assign.c10
-rw-r--r--numpy/core/src/common/array_assign.h20
-rw-r--r--numpy/core/src/common/binop_override.h5
-rw-r--r--numpy/core/src/common/cblasfuncs.c76
-rw-r--r--numpy/core/src/common/get_attr_string.h38
-rw-r--r--numpy/core/src/common/lowlevel_strided_loops.h36
-rw-r--r--numpy/core/src/common/npy_cblas.h570
-rw-r--r--numpy/core/src/common/npy_cblas_base.h557
-rw-r--r--numpy/core/src/common/npy_config.h1
-rw-r--r--numpy/core/src/common/npy_cpu_features.c.src410
-rw-r--r--numpy/core/src/common/npy_cpu_features.h117
-rw-r--r--numpy/core/src/common/npy_longdouble.c4
-rw-r--r--numpy/core/src/common/numpyos.c2
-rw-r--r--numpy/core/src/common/python_xerbla.c12
-rw-r--r--numpy/core/src/common/ucsnarrow.c126
-rw-r--r--numpy/core/src/common/ucsnarrow.h6
-rw-r--r--numpy/core/src/common/ufunc_override.c8
-rw-r--r--numpy/core/src/dummymodule.c9
-rw-r--r--numpy/core/src/multiarray/_datetime.h10
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src64
-rw-r--r--numpy/core/src/multiarray/alloc.c9
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c18
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c10
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.c8
-rw-r--r--numpy/core/src/multiarray/arrayobject.c163
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src653
-rw-r--r--numpy/core/src/multiarray/buffer.c79
-rw-r--r--numpy/core/src/multiarray/calculation.c21
-rw-r--r--numpy/core/src/multiarray/common.c211
-rw-r--r--numpy/core/src/multiarray/common.h17
-rw-r--r--numpy/core/src/multiarray/compiled_base.c47
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c55
-rw-r--r--numpy/core/src/multiarray/conversion_utils.h5
-rw-r--r--numpy/core/src/multiarray/convert.c6
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c126
-rw-r--r--numpy/core/src/multiarray/ctors.c508
-rw-r--r--numpy/core/src/multiarray/ctors.h8
-rw-r--r--numpy/core/src/multiarray/datetime.c90
-rw-r--r--numpy/core/src/multiarray/datetime_busday.c10
-rw-r--r--numpy/core/src/multiarray/datetime_busdaycal.c65
-rw-r--r--numpy/core/src/multiarray/datetime_strings.c10
-rw-r--r--numpy/core/src/multiarray/datetime_strings.h2
-rw-r--r--numpy/core/src/multiarray/descriptor.c1490
-rw-r--r--numpy/core/src/multiarray/descriptor.h6
-rw-r--r--numpy/core/src/multiarray/dragon4.c20
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c95
-rw-r--r--numpy/core/src/multiarray/einsum.c.src44
-rw-r--r--numpy/core/src/multiarray/flagsobject.c112
-rw-r--r--numpy/core/src/multiarray/getset.c116
-rw-r--r--numpy/core/src/multiarray/item_selection.c380
-rw-r--r--numpy/core/src/multiarray/item_selection.h2
-rw-r--r--numpy/core/src/multiarray/iterators.c192
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src18
-rw-r--r--numpy/core/src/multiarray/mapping.c88
-rw-r--r--numpy/core/src/multiarray/methods.c104
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c303
-rw-r--r--numpy/core/src/multiarray/nditer_api.c12
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c26
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c87
-rw-r--r--numpy/core/src/multiarray/npy_buffer.h (renamed from numpy/core/src/multiarray/buffer.h)2
-rw-r--r--numpy/core/src/multiarray/number.c192
-rw-r--r--numpy/core/src/multiarray/refcount.c6
-rw-r--r--numpy/core/src/multiarray/scalarapi.c230
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src1283
-rw-r--r--numpy/core/src/multiarray/scalartypes.h5
-rw-r--r--numpy/core/src/multiarray/sequence.c5
-rw-r--r--numpy/core/src/multiarray/shape.c4
-rw-r--r--numpy/core/src/multiarray/shape.h2
-rw-r--r--numpy/core/src/multiarray/strfuncs.c33
-rw-r--r--numpy/core/src/multiarray/strfuncs.h5
-rw-r--r--numpy/core/src/multiarray/typeinfo.c18
-rw-r--r--numpy/core/src/multiarray/usertypes.c43
-rw-r--r--numpy/core/src/multiarray/vdot.c16
-rw-r--r--numpy/core/src/npysort/npysort_common.h16
-rw-r--r--numpy/core/src/umath/_operand_flag_tests.c.src20
-rw-r--r--numpy/core/src/umath/_rational_tests.c.src67
-rw-r--r--numpy/core/src/umath/_struct_ufunc_tests.c.src22
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src36
-rw-r--r--numpy/core/src/umath/clip.c.src2
-rw-r--r--numpy/core/src/umath/clip.h.src2
-rw-r--r--numpy/core/src/umath/cpuid.c97
-rw-r--r--numpy/core/src/umath/cpuid.h9
-rw-r--r--numpy/core/src/umath/extobj.c8
-rw-r--r--numpy/core/src/umath/fast_loop_macros.h4
-rw-r--r--numpy/core/src/umath/funcs.inc.src14
-rw-r--r--numpy/core/src/umath/loops.c.src689
-rw-r--r--numpy/core/src/umath/loops.h.src238
-rw-r--r--numpy/core/src/umath/matmul.c.src51
-rw-r--r--numpy/core/src/umath/matmul.h.src2
-rw-r--r--numpy/core/src/umath/override.c89
-rw-r--r--numpy/core/src/umath/reduction.c23
-rw-r--r--numpy/core/src/umath/reduction.h4
-rw-r--r--numpy/core/src/umath/scalarmath.c.src198
-rw-r--r--numpy/core/src/umath/simd.inc.src654
-rw-r--r--numpy/core/src/umath/ufunc_object.c234
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c54
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.h7
-rw-r--r--numpy/core/src/umath/umathmodule.c30
-rw-r--r--numpy/core/tests/_locales.py4
-rw-r--r--numpy/core/tests/data/umath-validation-set-cos42
-rw-r--r--numpy/core/tests/data/umath-validation-set-sin47
-rw-r--r--numpy/core/tests/test_abc.py4
-rw-r--r--numpy/core/tests/test_api.py75
-rw-r--r--numpy/core/tests/test_arrayprint.py56
-rw-r--r--numpy/core/tests/test_cpu_features.py104
-rw-r--r--numpy/core/tests/test_datetime.py130
-rw-r--r--numpy/core/tests/test_defchararray.py59
-rw-r--r--numpy/core/tests/test_deprecations.py136
-rw-r--r--numpy/core/tests/test_dtype.py49
-rw-r--r--numpy/core/tests/test_einsum.py20
-rw-r--r--numpy/core/tests/test_errstate.py20
-rw-r--r--numpy/core/tests/test_extint128.py2
-rw-r--r--numpy/core/tests/test_function_base.py31
-rw-r--r--numpy/core/tests/test_getlimits.py18
-rw-r--r--numpy/core/tests/test_half.py4
-rw-r--r--numpy/core/tests/test_indexerrors.py18
-rw-r--r--numpy/core/tests/test_indexing.py85
-rw-r--r--numpy/core/tests/test_item_selection.py9
-rw-r--r--numpy/core/tests/test_longdouble.py152
-rw-r--r--numpy/core/tests/test_machar.py6
-rw-r--r--numpy/core/tests/test_mem_overlap.py40
-rw-r--r--numpy/core/tests/test_memmap.py7
-rw-r--r--numpy/core/tests/test_multiarray.py1007
-rw-r--r--numpy/core/tests/test_nditer.py16
-rw-r--r--numpy/core/tests/test_numeric.py229
-rw-r--r--numpy/core/tests/test_numerictypes.py67
-rw-r--r--numpy/core/tests/test_overrides.py32
-rw-r--r--numpy/core/tests/test_print.py7
-rw-r--r--numpy/core/tests/test_protocols.py44
-rw-r--r--numpy/core/tests/test_records.py47
-rw-r--r--numpy/core/tests/test_regression.py196
-rw-r--r--numpy/core/tests/test_scalar_ctors.py54
-rw-r--r--numpy/core/tests/test_scalar_methods.py12
-rw-r--r--numpy/core/tests/test_scalarbuffer.py40
-rw-r--r--numpy/core/tests/test_scalarinherit.py30
-rw-r--r--numpy/core/tests/test_scalarmath.py46
-rw-r--r--numpy/core/tests/test_scalarprint.py20
-rw-r--r--numpy/core/tests/test_shape_base.py27
-rw-r--r--numpy/core/tests/test_ufunc.py102
-rw-r--r--numpy/core/tests/test_umath.py229
-rw-r--r--numpy/core/tests/test_umath_accuracy.py54
-rw-r--r--numpy/core/tests/test_umath_complex.py53
-rw-r--r--numpy/core/tests/test_unicode.py108
-rw-r--r--numpy/core/umath.py9
-rw-r--r--numpy/core/umath_tests.py2
-rw-r--r--numpy/ctypeslib.py2
-rw-r--r--numpy/distutils/__init__.py2
-rw-r--r--numpy/distutils/ccompiler.py38
-rw-r--r--numpy/distutils/command/__init__.py2
-rw-r--r--numpy/distutils/command/autodist.py2
-rw-r--r--numpy/distutils/command/bdist_rpm.py2
-rw-r--r--numpy/distutils/command/build.py2
-rw-r--r--numpy/distutils/command/build_clib.py9
-rw-r--r--numpy/distutils/command/build_ext.py12
-rw-r--r--numpy/distutils/command/build_py.py2
-rw-r--r--numpy/distutils/command/build_scripts.py2
-rw-r--r--numpy/distutils/command/build_src.py2
-rw-r--r--numpy/distutils/command/config.py19
-rw-r--r--numpy/distutils/command/config_compiler.py2
-rw-r--r--numpy/distutils/command/develop.py2
-rw-r--r--numpy/distutils/command/egg_info.py2
-rw-r--r--numpy/distutils/command/install.py2
-rw-r--r--numpy/distutils/command/install_clib.py2
-rw-r--r--numpy/distutils/command/install_data.py2
-rw-r--r--numpy/distutils/command/install_headers.py2
-rw-r--r--numpy/distutils/command/sdist.py2
-rw-r--r--numpy/distutils/compat.py10
-rw-r--r--numpy/distutils/conv_template.py15
-rw-r--r--numpy/distutils/core.py6
-rw-r--r--numpy/distutils/cpuinfo.py36
-rw-r--r--numpy/distutils/exec_command.py16
-rw-r--r--numpy/distutils/extension.py8
-rw-r--r--numpy/distutils/fcompiler/__init__.py15
-rw-r--r--numpy/distutils/fcompiler/absoft.py2
-rw-r--r--numpy/distutils/fcompiler/compaq.py12
-rw-r--r--numpy/distutils/fcompiler/environment.py5
-rw-r--r--numpy/distutils/fcompiler/g95.py2
-rw-r--r--numpy/distutils/fcompiler/gnu.py36
-rw-r--r--numpy/distutils/fcompiler/hpux.py2
-rw-r--r--numpy/distutils/fcompiler/ibm.py2
-rw-r--r--numpy/distutils/fcompiler/intel.py2
-rw-r--r--numpy/distutils/fcompiler/lahey.py2
-rw-r--r--numpy/distutils/fcompiler/mips.py2
-rw-r--r--numpy/distutils/fcompiler/nag.py2
-rw-r--r--numpy/distutils/fcompiler/none.py2
-rw-r--r--numpy/distutils/fcompiler/pathf95.py2
-rw-r--r--numpy/distutils/fcompiler/pg.py98
-rw-r--r--numpy/distutils/fcompiler/sun.py2
-rw-r--r--numpy/distutils/fcompiler/vast.py2
-rw-r--r--numpy/distutils/from_template.py4
-rw-r--r--numpy/distutils/intelccompiler.py2
-rw-r--r--numpy/distutils/lib2def.py21
-rw-r--r--numpy/distutils/line_endings.py5
-rw-r--r--numpy/distutils/log.py14
-rw-r--r--numpy/distutils/mingw32ccompiler.py63
-rw-r--r--numpy/distutils/misc_util.py84
-rw-r--r--numpy/distutils/msvc9compiler.py2
-rw-r--r--numpy/distutils/msvccompiler.py2
-rw-r--r--numpy/distutils/npy_pkg_config.py12
-rw-r--r--numpy/distutils/numpy_distribution.py2
-rw-r--r--numpy/distutils/pathccompiler.py2
-rw-r--r--numpy/distutils/setup.py4
-rw-r--r--numpy/distutils/system_info.py457
-rw-r--r--numpy/distutils/tests/test_exec_command.py20
-rw-r--r--numpy/distutils/tests/test_fcompiler.py6
-rw-r--r--numpy/distutils/tests/test_fcompiler_gnu.py6
-rw-r--r--numpy/distutils/tests/test_fcompiler_intel.py6
-rw-r--r--numpy/distutils/tests/test_fcompiler_nagfor.py4
-rw-r--r--numpy/distutils/tests/test_mingw32ccompiler.py42
-rw-r--r--numpy/distutils/tests/test_misc_util.py10
-rw-r--r--numpy/distutils/tests/test_npy_pkg_config.py6
-rw-r--r--numpy/distutils/tests/test_shell_utils.py3
-rw-r--r--numpy/distutils/tests/test_system_info.py38
-rw-r--r--numpy/distutils/unixccompiler.py21
-rw-r--r--numpy/doc/__init__.py2
-rw-r--r--numpy/doc/basics.py5
-rw-r--r--numpy/doc/broadcasting.py1
-rw-r--r--numpy/doc/byteswapping.py1
-rw-r--r--numpy/doc/constants.py5
-rw-r--r--numpy/doc/creation.py1
-rw-r--r--numpy/doc/dispatch.py2
-rw-r--r--numpy/doc/glossary.py5
-rw-r--r--numpy/doc/indexing.py4
-rw-r--r--numpy/doc/internals.py1
-rw-r--r--numpy/doc/misc.py1
-rw-r--r--numpy/doc/structured_arrays.py1
-rw-r--r--numpy/doc/subclassing.py7
-rw-r--r--numpy/doc/ufuncs.py1
-rw-r--r--numpy/dual.py2
-rw-r--r--numpy/f2py/__init__.py8
-rw-r--r--numpy/f2py/__main__.py2
-rw-r--r--numpy/f2py/__version__.py2
-rw-r--r--numpy/f2py/auxfuncs.py6
-rw-r--r--numpy/f2py/capi_maps.py45
-rw-r--r--numpy/f2py/cb_rules.py6
-rw-r--r--numpy/f2py/cfuncs.py32
-rw-r--r--numpy/f2py/common_rules.py4
-rwxr-xr-xnumpy/f2py/crackfortran.py40
-rw-r--r--numpy/f2py/diagnose.py4
-rwxr-xr-xnumpy/f2py/f2py2e.py21
-rw-r--r--numpy/f2py/f2py_testing.py2
-rw-r--r--numpy/f2py/f90mod_rules.py9
-rw-r--r--numpy/f2py/func2subr.py4
-rwxr-xr-xnumpy/f2py/rules.py57
-rw-r--r--numpy/f2py/setup.py4
-rw-r--r--numpy/f2py/src/fortranobject.c111
-rw-r--r--numpy/f2py/src/fortranobject.h35
-rw-r--r--numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c14
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py12
-rw-r--r--numpy/f2py/tests/test_assumed_shape.py24
-rw-r--r--numpy/f2py/tests/test_block_docstring.py5
-rw-r--r--numpy/f2py/tests/test_callback.py10
-rw-r--r--numpy/f2py/tests/test_common.py2
-rw-r--r--numpy/f2py/tests/test_compile_function.py4
-rw-r--r--numpy/f2py/tests/test_crackfortran.py88
-rw-r--r--numpy/f2py/tests/test_kind.py2
-rw-r--r--numpy/f2py/tests/test_mixed.py8
-rw-r--r--numpy/f2py/tests/test_parameter.py2
-rw-r--r--numpy/f2py/tests/test_quoted_character.py3
-rw-r--r--numpy/f2py/tests/test_regression.py2
-rw-r--r--numpy/f2py/tests/test_return_character.py15
-rw-r--r--numpy/f2py/tests/test_return_complex.py14
-rw-r--r--numpy/f2py/tests/test_return_integer.py14
-rw-r--r--numpy/f2py/tests/test_return_logical.py4
-rw-r--r--numpy/f2py/tests/test_return_real.py19
-rw-r--r--numpy/f2py/tests/test_semicolon_split.py2
-rw-r--r--numpy/f2py/tests/test_size.py2
-rw-r--r--numpy/f2py/tests/test_string.py2
-rw-r--r--numpy/f2py/tests/util.py37
-rw-r--r--numpy/f2py/use_rules.py4
-rw-r--r--numpy/fft/__init__.py10
-rw-r--r--numpy/fft/_pocketfft.c31
-rw-r--r--numpy/fft/_pocketfft.py5
-rw-r--r--numpy/fft/helper.py2
-rw-r--r--numpy/fft/setup.py2
-rw-r--r--numpy/fft/tests/test_helper.py11
-rw-r--r--numpy/fft/tests/test_pocketfft.py14
-rw-r--r--numpy/lib/__init__.py2
-rw-r--r--numpy/lib/_datasource.py112
-rw-r--r--numpy/lib/_iotools.py149
-rw-r--r--numpy/lib/_version.py8
-rw-r--r--numpy/lib/arraypad.py6
-rw-r--r--numpy/lib/arraysetops.py52
-rw-r--r--numpy/lib/arrayterator.py11
-rw-r--r--numpy/lib/financial.py148
-rw-r--r--numpy/lib/format.py72
-rw-r--r--numpy/lib/function_base.py226
-rw-r--r--numpy/lib/histograms.py41
-rw-r--r--numpy/lib/index_tricks.py16
-rw-r--r--numpy/lib/mixins.py10
-rw-r--r--numpy/lib/nanfunctions.py37
-rw-r--r--numpy/lib/npyio.py164
-rw-r--r--numpy/lib/polynomial.py12
-rw-r--r--numpy/lib/recfunctions.py38
-rw-r--r--numpy/lib/scimath.py2
-rw-r--r--numpy/lib/setup.py2
-rw-r--r--numpy/lib/shape_base.py73
-rw-r--r--numpy/lib/stride_tricks.py12
-rw-r--r--numpy/lib/tests/test__datasource.py48
-rw-r--r--numpy/lib/tests/test__iotools.py17
-rw-r--r--numpy/lib/tests/test__version.py2
-rw-r--r--numpy/lib/tests/test_arraypad.py59
-rw-r--r--numpy/lib/tests/test_arraysetops.py133
-rw-r--r--numpy/lib/tests/test_arrayterator.py2
-rw-r--r--numpy/lib/tests/test_financial.py40
-rw-r--r--numpy/lib/tests/test_format.py95
-rw-r--r--numpy/lib/tests/test_function_base.py231
-rw-r--r--numpy/lib/tests/test_histograms.py21
-rw-r--r--numpy/lib/tests/test_index_tricks.py18
-rw-r--r--numpy/lib/tests/test_io.py107
-rw-r--r--numpy/lib/tests/test_mixins.py20
-rw-r--r--numpy/lib/tests/test_nanfunctions.py45
-rw-r--r--numpy/lib/tests/test_packbits.py2
-rw-r--r--numpy/lib/tests/test_polynomial.py4
-rw-r--r--numpy/lib/tests/test_recfunctions.py19
-rw-r--r--numpy/lib/tests/test_regression.py13
-rw-r--r--numpy/lib/tests/test_shape_base.py59
-rw-r--r--numpy/lib/tests/test_stride_tricks.py11
-rw-r--r--numpy/lib/tests/test_twodim_base.py22
-rw-r--r--numpy/lib/tests/test_type_check.py40
-rw-r--r--numpy/lib/tests/test_ufunclike.py8
-rw-r--r--numpy/lib/tests/test_utils.py9
-rw-r--r--numpy/lib/twodim_base.py6
-rw-r--r--numpy/lib/type_check.py22
-rw-r--r--numpy/lib/ufunclike.py26
-rw-r--r--numpy/lib/user_array.py12
-rw-r--r--numpy/lib/utils.py46
-rw-r--r--numpy/linalg/__init__.py2
-rw-r--r--numpy/linalg/lapack_lite/README.rst4
-rw-r--r--numpy/linalg/lapack_lite/clapack_scrub.py29
-rw-r--r--numpy/linalg/lapack_lite/f2c.c4
-rw-r--r--numpy/linalg/lapack_lite/f2c.h19
-rw-r--r--numpy/linalg/lapack_lite/f2c_blas.c6
-rw-r--r--numpy/linalg/lapack_lite/f2c_c_lapack.c6
-rw-r--r--numpy/linalg/lapack_lite/f2c_config.c6
-rw-r--r--numpy/linalg/lapack_lite/f2c_d_lapack.c6
-rw-r--r--numpy/linalg/lapack_lite/f2c_lapack.c6
-rw-r--r--numpy/linalg/lapack_lite/f2c_s_lapack.c6
-rw-r--r--numpy/linalg/lapack_lite/f2c_z_lapack.c6
-rw-r--r--numpy/linalg/lapack_lite/fortran.py12
-rw-r--r--numpy/linalg/lapack_lite/lapack_lite_names.h691
-rwxr-xr-xnumpy/linalg/lapack_lite/make_lite.py81
-rw-r--r--numpy/linalg/lapack_lite/python_xerbla.c9
-rw-r--r--numpy/linalg/lapack_litemodule.c232
-rw-r--r--numpy/linalg/linalg.py222
-rw-r--r--numpy/linalg/setup.py31
-rw-r--r--numpy/linalg/tests/test_build.py6
-rw-r--r--numpy/linalg/tests/test_deprecations.py2
-rw-r--r--numpy/linalg/tests/test_linalg.py84
-rw-r--r--numpy/linalg/tests/test_regression.py8
-rw-r--r--numpy/linalg/umath_linalg.c.src656
-rw-r--r--numpy/ma/README.rst (renamed from numpy/ma/README.txt)0
-rw-r--r--numpy/ma/__init__.py2
-rw-r--r--numpy/ma/bench.py4
-rw-r--r--numpy/ma/core.py260
-rw-r--r--numpy/ma/extras.py22
-rw-r--r--numpy/ma/mrecords.py13
-rw-r--r--numpy/ma/setup.py4
-rw-r--r--numpy/ma/tests/test_core.py84
-rw-r--r--numpy/ma/tests/test_deprecations.py6
-rw-r--r--numpy/ma/tests/test_extras.py43
-rw-r--r--numpy/ma/tests/test_mrecords.py8
-rw-r--r--numpy/ma/tests/test_old_ma.py8
-rw-r--r--numpy/ma/tests/test_regression.py8
-rw-r--r--numpy/ma/tests/test_subclassing.py8
-rw-r--r--numpy/ma/testutils.py2
-rw-r--r--numpy/ma/timer_comparison.py4
-rw-r--r--numpy/matlib.py21
-rw-r--r--numpy/matrixlib/__init__.py2
-rw-r--r--numpy/matrixlib/defmatrix.py4
-rw-r--r--numpy/matrixlib/setup.py4
-rw-r--r--numpy/matrixlib/tests/test_defmatrix.py29
-rw-r--r--numpy/matrixlib/tests/test_interaction.py31
-rw-r--r--numpy/matrixlib/tests/test_masked_matrix.py8
-rw-r--r--numpy/matrixlib/tests/test_matrix_linalg.py2
-rw-r--r--numpy/matrixlib/tests/test_multiarray.py4
-rw-r--r--numpy/matrixlib/tests/test_numeric.py4
-rw-r--r--numpy/matrixlib/tests/test_regression.py4
-rw-r--r--numpy/polynomial/__init__.py2
-rw-r--r--numpy/polynomial/_polybase.py48
-rw-r--r--numpy/polynomial/chebyshev.py108
-rw-r--r--numpy/polynomial/hermite.py94
-rw-r--r--numpy/polynomial/hermite_e.py94
-rw-r--r--numpy/polynomial/laguerre.py94
-rw-r--r--numpy/polynomial/legendre.py91
-rw-r--r--numpy/polynomial/polynomial.py92
-rw-r--r--numpy/polynomial/polyutils.py126
-rw-r--r--numpy/polynomial/setup.py2
-rw-r--r--numpy/polynomial/tests/test_chebyshev.py38
-rw-r--r--numpy/polynomial/tests/test_classes.py11
-rw-r--r--numpy/polynomial/tests/test_hermite.py34
-rw-r--r--numpy/polynomial/tests/test_hermite_e.py34
-rw-r--r--numpy/polynomial/tests/test_laguerre.py34
-rw-r--r--numpy/polynomial/tests/test_legendre.py34
-rw-r--r--numpy/polynomial/tests/test_polynomial.py36
-rw-r--r--numpy/polynomial/tests/test_polyutils.py6
-rw-r--r--numpy/polynomial/tests/test_printing.py6
-rw-r--r--numpy/random/.gitignore3
-rw-r--r--numpy/random/__init__.pxd14
-rw-r--r--numpy/random/__init__.py23
-rw-r--r--numpy/random/_bounded_integers.pxd.in (renamed from numpy/random/bounded_integers.pxd.in)2
-rw-r--r--numpy/random/_bounded_integers.pyx.in (renamed from numpy/random/bounded_integers.pyx.in)73
-rw-r--r--numpy/random/_common.pxd (renamed from numpy/random/common.pxd)20
-rw-r--r--numpy/random/_common.pyx (renamed from numpy/random/common.pyx)11
-rw-r--r--numpy/random/_examples/cffi/extending.py40
-rw-r--r--numpy/random/_examples/cffi/parse.py46
-rw-r--r--numpy/random/_examples/cython/extending.pyx (renamed from numpy/random/examples/cython/extending.pyx)6
-rw-r--r--numpy/random/_examples/cython/extending_distributions.pyx117
-rw-r--r--numpy/random/_examples/cython/setup.py41
-rw-r--r--numpy/random/_examples/numba/extending.py84
-rw-r--r--numpy/random/_examples/numba/extending_distributions.py (renamed from numpy/random/examples/numba/extending_distributions.py)22
-rw-r--r--numpy/random/_generator.pyx (renamed from numpy/random/generator.pyx)547
-rw-r--r--numpy/random/_mt19937.pyx (renamed from numpy/random/mt19937.pyx)9
-rw-r--r--numpy/random/_pcg64.pyx (renamed from numpy/random/pcg64.pyx)10
-rw-r--r--numpy/random/_philox.pyx (renamed from numpy/random/philox.pyx)21
-rw-r--r--numpy/random/_pickle.py10
-rw-r--r--numpy/random/_sfc64.pyx (renamed from numpy/random/sfc64.pyx)10
-rw-r--r--numpy/random/bit_generator.pxd13
-rw-r--r--numpy/random/bit_generator.pyx11
-rw-r--r--numpy/random/c_distributions.pxd114
-rw-r--r--numpy/random/distributions.pxd140
-rw-r--r--numpy/random/examples/cython/extending_distributions.pyx59
-rw-r--r--numpy/random/examples/cython/setup.py27
-rw-r--r--numpy/random/examples/numba/extending.py77
-rw-r--r--numpy/random/include/aligned_malloc.h (renamed from numpy/random/src/aligned_malloc/aligned_malloc.h)0
-rw-r--r--numpy/random/include/legacy-distributions.h (renamed from numpy/random/src/legacy/legacy-distributions.h)2
-rw-r--r--numpy/random/legacy_distributions.pxd50
-rw-r--r--numpy/random/mtrand.pyx480
-rw-r--r--numpy/random/setup.py80
-rw-r--r--numpy/random/src/aligned_malloc/aligned_malloc.c9
-rw-r--r--numpy/random/src/distributions/distributions.c335
-rw-r--r--numpy/random/src/distributions/random_hypergeometric.c6
-rw-r--r--numpy/random/src/distributions/random_mvhg_count.c131
-rw-r--r--numpy/random/src/distributions/random_mvhg_marginals.c138
-rw-r--r--numpy/random/src/legacy/legacy-distributions.c10
-rw-r--r--numpy/random/src/philox/philox-benchmark.c2
-rw-r--r--numpy/random/tests/test_direct.py13
-rw-r--r--numpy/random/tests/test_extending.py85
-rw-r--r--numpy/random/tests/test_generator_mt19937.py249
-rw-r--r--numpy/random/tests/test_generator_mt19937_regressions.py20
-rw-r--r--numpy/random/tests/test_random.py27
-rw-r--r--numpy/random/tests/test_randomstate.py27
-rw-r--r--numpy/random/tests/test_randomstate_regression.py19
-rw-r--r--numpy/random/tests/test_regression.py19
-rw-r--r--numpy/random/tests/test_seed_sequence.py2
-rw-r--r--numpy/random/tests/test_smoke.py11
-rw-r--r--numpy/setup.py4
-rw-r--r--numpy/testing/__init__.py2
-rw-r--r--numpy/testing/_private/decorators.py18
-rw-r--r--numpy/testing/_private/noseclasses.py4
-rw-r--r--numpy/testing/_private/nosetester.py23
-rw-r--r--numpy/testing/_private/parameterized.py67
-rw-r--r--numpy/testing/_private/utils.py266
-rwxr-xr-xnumpy/testing/print_coercion_tables.py6
-rwxr-xr-xnumpy/testing/setup.py4
-rw-r--r--numpy/testing/tests/test_decorators.py16
-rw-r--r--numpy/testing/tests/test_doctesting.py2
-rw-r--r--numpy/testing/tests/test_utils.py70
-rw-r--r--numpy/testing/utils.py2
-rw-r--r--numpy/tests/test_ctypeslib.py12
-rw-r--r--numpy/tests/test_matlib.py10
-rw-r--r--numpy/tests/test_numpy_version.py2
-rw-r--r--numpy/tests/test_public_api.py24
-rw-r--r--numpy/tests/test_reloading.py9
-rw-r--r--numpy/tests/test_scripts.py5
-rw-r--r--numpy/tests/test_warnings.py114
520 files changed, 15227 insertions, 14115 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index fef8245de..2d3423c56 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -104,8 +104,6 @@ available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
Exceptions to this rule are documented.
"""
-from __future__ import division, absolute_import, print_function
-
import sys
import warnings
@@ -143,7 +141,8 @@ else:
from .core import *
from . import compat
from . import lib
- # FIXME: why have numpy.lib if everything is imported here??
+ # NOTE: to be revisited following future namespace cleanup.
+ # See gh-14454 and gh-15672 for discussion.
from .lib import *
from . import linalg
@@ -154,28 +153,28 @@ else:
from . import ma
from . import matrixlib as _mat
from .matrixlib import *
- from .compat import long
# Make these accessible from numpy name-space
# but not imported in from numpy import *
- if sys.version_info[0] >= 3:
- from builtins import bool, int, float, complex, object, str
- unicode = str
- else:
- from __builtin__ import bool, int, float, complex, object, unicode, str
+ # TODO[gh-6103]: Deprecate these
+ from builtins import bool, int, float, complex, object, str
+ from .compat import long, unicode
from .core import round, abs, max, min
# now that numpy modules are imported, can initialize limits
core.getlimits._register_known_types()
- __all__.extend(['bool', 'int', 'float', 'complex', 'object', 'unicode',
- 'str'])
__all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
+ # These are added by `from .core import *` and `core.__all__`, but we
+ # overwrite them above with builtins we do _not_ want to export.
+ __all__.remove('long')
+ __all__.remove('unicode')
+
# Remove things that are in the numpy.lib but not in the numpy namespace
# Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
# that prevents adding more things to the main namespace by accident.
@@ -216,7 +215,7 @@ else:
"{!r}".format(__name__, attr))
def __dir__():
- return __all__ + ['Tester', 'testing']
+ return list(globals().keys() | {'Tester', 'testing'})
else:
# We don't actually use this ourselves anymore, but I'm not 100% sure that
@@ -254,3 +253,35 @@ else:
_sanity_check()
del _sanity_check
+
+ def _mac_os_check():
+ """
+ Quick Sanity check for Mac OS look for accelerate build bugs.
+ Testing numpy polyfit calls init_dgelsd(LAPACK)
+ """
+ try:
+ c = array([3., 2., 1.])
+ x = linspace(0, 2, 5)
+ y = polyval(c, x)
+ _ = polyfit(x, y, 2, cov=True)
+ except ValueError:
+ pass
+
+ import sys
+ if sys.platform == "darwin":
+ with warnings.catch_warnings(record=True) as w:
+ _mac_os_check()
+ # Throw runtime error, if the test failed Check for warning and error_message
+ error_message = ""
+ if len(w) > 0:
+ error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message))
+ msg = (
+ "Polyfit sanity test emitted a warning, most likely due "
+ "to using a buggy Accelerate backend. "
+ "If you compiled yourself, "
+ "see site.cfg.example for information. "
+ "Otherwise report this to the vendor "
+ "that provided NumPy.\n{}\n".format(
+ error_message))
+ raise RuntimeError(msg)
+ del _mac_os_check
diff --git a/numpy/_build_utils/__init__.py b/numpy/_build_utils/__init__.py
index 1d0f69b67..e69de29bb 100644
--- a/numpy/_build_utils/__init__.py
+++ b/numpy/_build_utils/__init__.py
@@ -1 +0,0 @@
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/_build_utils/apple_accelerate.py b/numpy/_build_utils/apple_accelerate.py
index 36dd7584a..b26aa12ad 100644
--- a/numpy/_build_utils/apple_accelerate.py
+++ b/numpy/_build_utils/apple_accelerate.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import re
diff --git a/numpy/_globals.py b/numpy/_globals.py
index f5c0761b5..9f44c7729 100644
--- a/numpy/_globals.py
+++ b/numpy/_globals.py
@@ -15,8 +15,6 @@ That was not the case when the singleton classes were defined in the numpy
motivated this module.
"""
-from __future__ import division, absolute_import, print_function
-
__ALL__ = [
'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue'
]
@@ -56,7 +54,7 @@ class VisibleDeprecationWarning(UserWarning):
VisibleDeprecationWarning.__module__ = 'numpy'
-class _NoValueType(object):
+class _NoValueType:
"""Special keyword value.
The instance of this class may be used as the default value assigned to a
diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py
index b25224c20..0dc38fa59 100644
--- a/numpy/_pytesttester.py
+++ b/numpy/_pytesttester.py
@@ -15,7 +15,7 @@ Warnings filtering and other runtime settings should be dealt with in the
whether or not that file is found as follows:
* ``pytest.ini`` is present (develop mode)
- All warnings except those explicily filtered out are raised as error.
+ All warnings except those explicitly filtered out are raised as error.
* ``pytest.ini`` is absent (release mode)
DeprecationWarnings and PendingDeprecationWarnings are ignored, other
warnings are passed through.
@@ -27,8 +27,6 @@ This module is imported by every numpy subpackage, so lies at the top level to
simplify circular import issues. For the same reason, it contains no numpy
imports at module scope, instead importing numpy within function calls.
"""
-from __future__ import division, absolute_import, print_function
-
import sys
import os
@@ -44,7 +42,7 @@ def _show_numpy_info():
print("NumPy relaxed strides checking option:", relaxed_strides)
-class PytestTester(object):
+class PytestTester:
"""
Pytest test runner.
@@ -127,13 +125,6 @@ class PytestTester(object):
import pytest
import warnings
- #FIXME This is no longer needed? Assume it was for use in tests.
- # cap verbosity at 3, which is equivalent to the pytest '-vv' option
- #from . import utils
- #verbose = min(int(verbose), 3)
- #utils.verbose = verbose
- #
-
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
@@ -162,21 +153,9 @@ class PytestTester(object):
# When testing matrices, ignore their PendingDeprecationWarnings
pytest_args += [
"-W ignore:the matrix subclass is not",
+ "-W ignore:Importing from numpy.matlib is",
]
- # Ignore python2.7 -3 warnings
- pytest_args += [
- r"-W ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning",
- r"-W ignore:in 3\.x, __setslice__:DeprecationWarning",
- r"-W ignore:in 3\.x, __getslice__:DeprecationWarning",
- r"-W ignore:buffer\(\) not supported in 3\.x:DeprecationWarning",
- r"-W ignore:CObject type is not supported in 3\.x:DeprecationWarning",
- r"-W ignore:comparing unequal types not supported in 3\.x:DeprecationWarning",
- r"-W ignore:the commands module has been removed in Python 3\.0:DeprecationWarning",
- r"-W ignore:The 'new' module has been removed in Python 3\.0:DeprecationWarning",
- ]
-
-
if doctests:
raise ValueError("Doctests not supported")
diff --git a/numpy/compat/__init__.py b/numpy/compat/__init__.py
index 5b371f5c0..afee621b8 100644
--- a/numpy/compat/__init__.py
+++ b/numpy/compat/__init__.py
@@ -8,8 +8,6 @@ extensions, which may be included for the following reasons:
* we may only need a small subset of the copied library/module
"""
-from __future__ import division, absolute_import, print_function
-
from . import _inspect
from . import py3k
from ._inspect import getargspec, formatargspec
diff --git a/numpy/compat/_inspect.py b/numpy/compat/_inspect.py
index 439d0d2c2..9a874a71d 100644
--- a/numpy/compat/_inspect.py
+++ b/numpy/compat/_inspect.py
@@ -5,8 +5,6 @@ significantly contributes to numpy import times. Importing this copy has almost
no overhead.
"""
-from __future__ import division, absolute_import, print_function
-
import types
__all__ = ['getargspec', 'formatargspec']
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index c9ed9d52c..fd9f8bd42 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -1,9 +1,15 @@
"""
-Python 3 compatibility tools.
+Python 3.X compatibility tools.
-"""
-from __future__ import division, absolute_import, print_function
+While this file was originally intended for Python 2 -> 3 transition,
+it is now used to create a compatibility layer between different
+minor versions of Python 3.
+While the active version of numpy may not support a given version of python, we
+allow downstream libraries to continue to use these shims for forward
+compatibility with numpy while they transition their code to newer versions of
+Python.
+"""
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
@@ -12,76 +18,48 @@ __all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
import sys
import os
-try:
- from pathlib import Path, PurePath
-except ImportError:
- Path = PurePath = None
-
-if sys.version_info[0] >= 3:
- import io
-
- try:
- import pickle5 as pickle
- except ImportError:
- import pickle
-
- long = int
- integer_types = (int,)
- basestring = str
- unicode = str
- bytes = bytes
-
- def asunicode(s):
- if isinstance(s, bytes):
- return s.decode('latin1')
- return str(s)
+from pathlib import Path, PurePath
+import io
- def asbytes(s):
- if isinstance(s, bytes):
- return s
- return str(s).encode('latin1')
+import abc
+from abc import ABC as abc_ABC
- def asstr(s):
- if isinstance(s, bytes):
- return s.decode('latin1')
- return str(s)
+try:
+ import pickle5 as pickle
+except ImportError:
+ import pickle
- def isfileobj(f):
- return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
+long = int
+integer_types = (int,)
+basestring = str
+unicode = str
+bytes = bytes
- def open_latin1(filename, mode='r'):
- return open(filename, mode=mode, encoding='iso-8859-1')
+def asunicode(s):
+ if isinstance(s, bytes):
+ return s.decode('latin1')
+ return str(s)
- def sixu(s):
+def asbytes(s):
+ if isinstance(s, bytes):
return s
+ return str(s).encode('latin1')
- strchar = 'U'
+def asstr(s):
+ if isinstance(s, bytes):
+ return s.decode('latin1')
+ return str(s)
-else:
- import cpickle as pickle
+def isfileobj(f):
+ return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
- bytes = str
- long = long
- basestring = basestring
- unicode = unicode
- integer_types = (int, long)
- asbytes = str
- asstr = str
- strchar = 'S'
+def open_latin1(filename, mode='r'):
+ return open(filename, mode=mode, encoding='iso-8859-1')
- def isfileobj(f):
- return isinstance(f, file)
+def sixu(s):
+ return s
- def asunicode(s):
- if isinstance(s, unicode):
- return s
- return str(s).decode('ascii')
-
- def open_latin1(filename, mode='r'):
- return open(filename, mode=mode)
-
- def sixu(s):
- return unicode(s, 'unicode_escape')
+strchar = 'U'
def getexception():
return sys.exc_info()[1]
@@ -107,7 +85,7 @@ def is_pathlib_path(obj):
return Path is not None and isinstance(obj, Path)
# from Python 3.7
-class contextlib_nullcontext(object):
+class contextlib_nullcontext:
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
@@ -128,69 +106,30 @@ class contextlib_nullcontext(object):
pass
-if sys.version_info[0] >= 3 and sys.version_info[1] >= 4:
- def npy_load_module(name, fn, info=None):
- """
- Load a module.
-
- .. versionadded:: 1.11.2
-
- Parameters
- ----------
- name : str
- Full module name.
- fn : str
- Path to module file.
- info : tuple, optional
- Only here for backward compatibility with Python 2.*.
-
- Returns
- -------
- mod : module
-
- """
- import importlib.machinery
- return importlib.machinery.SourceFileLoader(name, fn).load_module()
-else:
- def npy_load_module(name, fn, info=None):
- """
- Load a module.
-
- .. versionadded:: 1.11.2
+def npy_load_module(name, fn, info=None):
+ """
+ Load a module.
- Parameters
- ----------
- name : str
- Full module name.
- fn : str
- Path to module file.
- info : tuple, optional
- Information as returned by `imp.find_module`
- (suffix, mode, type).
+ .. versionadded:: 1.11.2
- Returns
- -------
- mod : module
+ Parameters
+ ----------
+ name : str
+ Full module name.
+ fn : str
+ Path to module file.
+ info : tuple, optional
+ Only here for backward compatibility with Python 2.*.
- """
- import imp
- if info is None:
- path = os.path.dirname(fn)
- fo, fn, info = imp.find_module(name, [path])
- else:
- fo = open(fn, info[1])
- try:
- mod = imp.load_module(name, fo, fn, info)
- finally:
- fo.close()
- return mod
+ Returns
+ -------
+ mod : module
-# backport abc.ABC
-import abc
-if sys.version_info[:2] >= (3, 4):
- abc_ABC = abc.ABC
-else:
- abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
+ """
+ # Explicitly lazy import this to avoid paying the cost
+ # of importing importlib at startup
+ from importlib.machinery import SourceFileLoader
+ return SourceFileLoader(name, fn).load_module()
# Backport os.fs_path, os.PathLike, and PurePath.__fspath__
diff --git a/numpy/compat/setup.py b/numpy/compat/setup.py
index 882857428..afa511673 100644
--- a/numpy/compat/setup.py
+++ b/numpy/compat/setup.py
@@ -1,5 +1,3 @@
-from __future__ import division, print_function
-
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/compat/tests/test_compat.py b/numpy/compat/tests/test_compat.py
index 1543aafaf..2b8acbaa0 100644
--- a/numpy/compat/tests/test_compat.py
+++ b/numpy/compat/tests/test_compat.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from os.path import join
from numpy.compat import isfileobj
diff --git a/numpy/conftest.py b/numpy/conftest.py
index 18d5d1ce9..1d3e0349f 100644
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -1,8 +1,9 @@
"""
Pytest configuration and fixtures for the Numpy test suite.
"""
-from __future__ import division, absolute_import, print_function
+import os
+import hypothesis
import pytest
import numpy
@@ -12,6 +13,12 @@ from numpy.core._multiarray_tests import get_fpu_mode
_old_fpu_mode = None
_collect_results = {}
+# See https://hypothesis.readthedocs.io/en/latest/settings.html
+hypothesis.settings.register_profile(
+ name="numpy-profile", deadline=None, print_blob=True,
+)
+hypothesis.settings.load_profile("numpy-profile")
+
def pytest_configure(config):
config.addinivalue_line("markers",
@@ -22,6 +29,22 @@ def pytest_configure(config):
"slow: Tests that are very slow.")
+def pytest_addoption(parser):
+ parser.addoption("--available-memory", action="store", default=None,
+ help=("Set amount of memory available for running the "
+ "test suite. This can result to tests requiring "
+ "especially large amounts of memory to be skipped. "
+ "Equivalent to setting environment variable "
+ "NPY_AVAILABLE_MEM. Default: determined"
+ "automatically."))
+
+
+def pytest_sessionstart(session):
+ available_mem = session.config.getoption('available_memory')
+ if available_mem is not None:
+ os.environ['NPY_AVAILABLE_MEM'] = available_mem
+
+
#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index c3b3f0392..c2d53fe3e 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -6,8 +6,6 @@ are available in the main ``numpy`` namespace - use that instead.
"""
-from __future__ import division, absolute_import, print_function
-
from numpy.version import version as __version__
import os
@@ -137,16 +135,11 @@ def _ufunc_reduce(func):
return _ufunc_reconstruct, (whichmodule(func, name), name)
-import sys
-if sys.version_info[0] >= 3:
- import copyreg
-else:
- import copy_reg as copyreg
+import copyreg
copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)
# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)
del copyreg
-del sys
del _ufunc_reduce
from numpy._pytesttester import PytestTester
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index dbe3d226f..18ab10078 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -8,9 +8,6 @@ NOTE: Many of the methods of ndarray have corresponding functions.
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
-from __future__ import division, absolute_import, print_function
-
-import sys
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
@@ -155,6 +152,8 @@ add_newdoc('numpy.core', 'flatiter', ('copy',
add_newdoc('numpy.core', 'nditer',
"""
+ nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0)
+
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
@@ -787,7 +786,7 @@ add_newdoc('numpy.core', 'broadcast', ('reset',
add_newdoc('numpy.core.multiarray', 'array',
"""
- array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
+ array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0)
Create an array.
@@ -1036,7 +1035,12 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
- the data must be in exactly this format.
+ the data must be in exactly this format. Most builtin numeric types are
+ supported and extension types may be supported.
+
+ .. versionadded:: 1.18.0
+ Complex dtypes.
+
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
@@ -1172,6 +1176,11 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
+ Most builtin numeric types are supported and extension types may be supported.
+
+ .. versionadded:: 1.18.0
+ Complex dtypes.
+
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
@@ -1196,7 +1205,7 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
- data storage, as the binary files generated are are not platform
+ data storage, as the binary files generated are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
@@ -1326,9 +1335,9 @@ add_newdoc('numpy.core.multiarray', 'arange',
See Also
--------
- linspace : Evenly spaced numbers with careful handling of endpoints.
- ogrid: Arrays of evenly spaced numbers in N-dimensions.
- mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
+ numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
+ numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
+ numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
@@ -1476,59 +1485,6 @@ add_newdoc('numpy.core.multiarray', 'promote_types',
""")
-if sys.version_info.major < 3:
- add_newdoc('numpy.core.multiarray', 'newbuffer',
- """
- newbuffer(size)
-
- Return a new uninitialized buffer object.
-
- Parameters
- ----------
- size : int
- Size in bytes of returned buffer object.
-
- Returns
- -------
- newbuffer : buffer object
- Returned, uninitialized buffer object of `size` bytes.
-
- """)
-
- add_newdoc('numpy.core.multiarray', 'getbuffer',
- """
- getbuffer(obj [,offset[, size]])
-
- Create a buffer object from the given object referencing a slice of
- length size starting at offset.
-
- Default is the entire buffer. A read-write buffer is attempted followed
- by a read-only buffer.
-
- Parameters
- ----------
- obj : object
-
- offset : int, optional
-
- size : int, optional
-
- Returns
- -------
- buffer_obj : buffer
-
- Examples
- --------
- >>> buf = np.getbuffer(np.ones(5), 1, 3)
- >>> len(buf)
- 3
- >>> buf[0]
- '\\x00'
- >>> buf
- <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
-
- """)
-
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
@@ -2071,25 +2027,22 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
Examples
--------
>>> import ctypes
+ >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32)
>>> x
array([[0, 1],
- [2, 3]])
+ [2, 3]], dtype=int32)
>>> x.ctypes.data
- 30439712
- >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
- <ctypes.LP_c_long object at 0x01F01300>
- >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
- c_long(0)
- >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
- c_longlong(4294967296L)
+ 31962608 # may vary
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
+ <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents
+ c_uint(0)
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents
+ c_ulong(4294967296)
>>> x.ctypes.shape
- <numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
- >>> x.ctypes.shape_as(ctypes.c_long)
- <numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
+ <numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1fce60> # may vary
>>> x.ctypes.strides
- <numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
- >>> x.ctypes.strides_as(ctypes.c_longlong)
- <numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
+ <numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1ff320> # may vary
"""))
@@ -2363,7 +2316,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
>>> np.zeros((4,2))[::2].shape = (-1,)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- AttributeError: incompatible shape for a non-contiguous array
+ AttributeError: Incompatible shape for in-place modification. Use
+ `.reshape()` to make a copy with the desired shape.
See Also
--------
@@ -2497,7 +2451,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
- """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
+ """ a.__array__([dtype], /) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
@@ -3706,10 +3660,10 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
See Also
--------
numpy.sort : Return a sorted copy of an array.
- argsort : Indirect sort.
- lexsort : Indirect stable sort on multiple keys.
- searchsorted : Find elements in sorted array.
- partition: Partial sort.
+ numpy.argsort : Indirect sort.
+ numpy.lexsort : Indirect stable sort on multiple keys.
+ numpy.searchsorted : Find elements in sorted array.
+ numpy.partition: Partial sort.
Notes
-----
@@ -3943,15 +3897,22 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
Examples
--------
- For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``:
+ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
+ except that ``tolist`` changes numpy scalars to Python scalars:
- >>> a = np.array([1, 2])
- >>> list(a)
+ >>> a = np.uint32([1, 2])
+ >>> a_list = list(a)
+ >>> a_list
[1, 2]
- >>> a.tolist()
+ >>> type(a_list[0])
+ <class 'numpy.uint32'>
+ >>> a_tolist = a.tolist()
+ >>> a_tolist
[1, 2]
+ >>> type(a_tolist[0])
+ <class 'int'>
- However, for a 2D array, ``tolist`` applies recursively:
+ Additionally, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
@@ -3971,8 +3932,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""))
-tobytesdoc = """
- a.{name}(order='C')
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """
+ a.tobytes(order='C')
Construct Python bytes containing the raw data bytes in the array.
@@ -3982,11 +3943,11 @@ tobytesdoc = """
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
- {deprecated}
+ .. versionadded:: 1.9.0
Parameters
----------
- order : {{'C', 'F', None}}, optional
+ order : {'C', 'F', None}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
@@ -4005,18 +3966,19 @@ tobytesdoc = """
>>> x.tobytes('F')
b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
- """
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', r"""
+ a.tostring(order='C')
+
+ A compatibility alias for `tobytes`, with exactly the same behavior.
+
+ Despite its name, it returns `bytes` not `str`\ s.
+
+ .. deprecated:: 1.19.0
+ """))
-add_newdoc('numpy.core.multiarray', 'ndarray',
- ('tostring', tobytesdoc.format(name='tostring',
- deprecated=
- 'This function is a compatibility '
- 'alias for tobytes. Despite its '
- 'name it returns bytes not '
- 'strings.')))
-add_newdoc('numpy.core.multiarray', 'ndarray',
- ('tobytes', tobytesdoc.format(name='tobytes',
- deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
@@ -4107,21 +4069,26 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
- a.view(dtype=None, type=None)
+ a.view([dtype][, type])
New view of array with the same data.
+ .. note::
+ Passing None for ``dtype`` is different from omitting the parameter,
+ since the former invokes ``dtype(None)`` which is an alias for
+ ``dtype('float_')``.
+
Parameters
----------
dtype : data-type or ndarray sub-class, optional
- Data-type descriptor of the returned view, e.g., float32 or int16. The
- default, None, results in the view having the same data-type as `a`.
+ Data-type descriptor of the returned view, e.g., float32 or int16.
+ Omitting it results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
- Type of the returned view, e.g., ndarray or matrix. Again, the
- default None results in type preservation.
+ Type of the returned view, e.g., ndarray or matrix. Again, omission
+ of the parameter results in type preservation.
Notes
-----
@@ -4213,7 +4180,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
- frompyfunc(func, nin, nout)
+ frompyfunc(func, nin, nout, *[, identity])
Takes an arbitrary Python function and returns a NumPy ufunc.
@@ -4228,6 +4195,13 @@ add_newdoc('numpy.core.umath', 'frompyfunc',
The number of input arguments.
nout : int
The number of objects returned by `func`.
+ identity : object, optional
+ The value to use for the `~numpy.ufunc.identity` attribute of the resulting
+ object. If specified, this is equivalent to setting the underlying
+ C ``identity`` field to ``PyUFunc_IdentityValue``.
+ If omitted, the identity is set to ``PyUFunc_None``. Note that this is
+ _not_ equivalent to setting the identity to ``None``, which implies the
+ operation is reorderable.
Returns
-------
@@ -4236,7 +4210,7 @@ add_newdoc('numpy.core.umath', 'frompyfunc',
See Also
--------
- vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
+ vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
Notes
-----
@@ -4497,7 +4471,7 @@ add_newdoc('numpy.core', 'ufunc',
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
- number of outputs; use `None` for uninitialized outputs to be
+ number of outputs; use None for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
This condition is broadcast over the input. At locations where the
@@ -4691,7 +4665,7 @@ add_newdoc('numpy.core', 'ufunc', ('signature',
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
- For ufuncs that operate on scalars, the signature is `None`, which is
+ For ufuncs that operate on scalars, the signature is None, which is
equivalent to '()' for every argument.
Examples
@@ -4742,7 +4716,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
.. versionadded:: 1.7.0
- If this is `None`, a reduction is performed over all the axes.
+ If this is None, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
@@ -4755,7 +4729,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
- A location into which the result is stored. If not provided or `None`,
+ A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
@@ -4872,7 +4846,7 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate',
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
- A location into which the result is stored. If not provided or `None`,
+ A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
@@ -4954,7 +4928,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
- A location into which the result is stored. If not provided or `None`,
+ A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
@@ -5327,7 +5301,8 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
`__array_interface__` attribute.
Warning: This attribute exists specifically for `__array_interface__`,
- and is not a datatype description compatible with `np.dtype`.
+ and passing it directly to `np.dtype` will not accurately reconstruct
+ some dtypes (e.g., scalar and subarray dtypes).
Examples
--------
@@ -6853,4 +6828,3 @@ for float_name in ('half', 'single', 'double', 'longdouble'):
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
-
diff --git a/numpy/core/_asarray.py b/numpy/core/_asarray.py
index 0ad4161f4..df569f22d 100644
--- a/numpy/core/_asarray.py
+++ b/numpy/core/_asarray.py
@@ -3,8 +3,6 @@ Functions in the ``as*array`` family that promote array-likes into arrays.
`require` fits this category despite its name not matching this pattern.
"""
-from __future__ import division, absolute_import, print_function
-
from .overrides import set_module
from .multiarray import array
diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py
index df1ff180e..6b0ec5903 100644
--- a/numpy/core/_dtype.py
+++ b/numpy/core/_dtype.py
@@ -3,10 +3,6 @@ A place for code to be called from the implementation of np.dtype
String handling is much easier to do correctly in python.
"""
-from __future__ import division, absolute_import, print_function
-
-import sys
-
import numpy as np
@@ -19,18 +15,10 @@ _kind_to_stem = {
'V': 'void',
'O': 'object',
'M': 'datetime',
- 'm': 'timedelta'
+ 'm': 'timedelta',
+ 'S': 'bytes',
+ 'U': 'str',
}
-if sys.version_info[0] >= 3:
- _kind_to_stem.update({
- 'S': 'bytes',
- 'U': 'str'
- })
-else:
- _kind_to_stem.update({
- 'S': 'string',
- 'U': 'unicode'
- })
def _kind_name(dtype):
diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py
index 88a45561f..99172e23d 100644
--- a/numpy/core/_exceptions.py
+++ b/numpy/core/_exceptions.py
@@ -157,7 +157,6 @@ class _ArrayMemoryError(MemoryError):
@staticmethod
def _size_to_string(num_bytes):
""" Convert a number of bytes into a binary size string """
- import math
# https://en.wikipedia.org/wiki/Binary_prefix
LOG2_STEP = 10
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index b0ea603e1..1378497bb 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -4,13 +4,11 @@ A place for internal code
Some things are more easily handled Python.
"""
-from __future__ import division, absolute_import, print_function
-
+import ast
import re
import sys
import platform
-from numpy.compat import unicode
from .multiarray import dtype, array, ndarray
try:
import ctypes
@@ -20,9 +18,9 @@ except ImportError:
IS_PYPY = platform.python_implementation() == 'PyPy'
if (sys.byteorder == 'little'):
- _nbo = b'<'
+ _nbo = '<'
else:
- _nbo = b'>'
+ _nbo = '>'
def _makenames_list(adict, align):
allfields = []
@@ -145,16 +143,16 @@ def _reconstruct(subtype, shape, dtype):
# format_re was originally from numarray by J. Todd Miller
-format_re = re.compile(br'(?P<order1>[<>|=]?)'
- br'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
- br'(?P<order2>[<>|=]?)'
- br'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
-sep_re = re.compile(br'\s*,\s*')
-space_re = re.compile(br'\s+$')
+format_re = re.compile(r'(?P<order1>[<>|=]?)'
+ r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
+ r'(?P<order2>[<>|=]?)'
+ r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
+sep_re = re.compile(r'\s*,\s*')
+space_re = re.compile(r'\s+$')
# astr is a string (perhaps comma separated)
-_convorder = {b'=': _nbo}
+_convorder = {'=': _nbo}
def _commastring(astr):
startindex = 0
@@ -179,9 +177,9 @@ def _commastring(astr):
(len(result)+1, astr))
startindex = mo.end()
- if order2 == b'':
+ if order2 == '':
order = order1
- elif order1 == b'':
+ elif order1 == '':
order = order2
else:
order1 = _convorder.get(order1, order1)
@@ -192,18 +190,18 @@ def _commastring(astr):
(order1, order2))
order = order1
- if order in [b'|', b'=', _nbo]:
- order = b''
+ if order in ['|', '=', _nbo]:
+ order = ''
dtype = order + dtype
- if (repeats == b''):
+ if (repeats == ''):
newitem = dtype
else:
- newitem = (dtype, eval(repeats))
+ newitem = (dtype, ast.literal_eval(repeats))
result.append(newitem)
return result
-class dummy_ctype(object):
+class dummy_ctype:
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
@@ -238,64 +236,22 @@ _getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
-class _missing_ctypes(object):
+class _missing_ctypes:
def cast(self, num, obj):
return num.value
- class c_void_p(object):
+ class c_void_p:
def __init__(self, ptr):
self.value = ptr
-class _unsafe_first_element_pointer(object):
- """
- Helper to allow viewing an array as a ctypes pointer to the first element
-
- This avoids:
- * dealing with strides
- * `.view` rejecting object-containing arrays
- * `memoryview` not supporting overlapping fields
- """
- def __init__(self, arr):
- self.base = arr
-
- @property
- def __array_interface__(self):
- i = dict(
- shape=(),
- typestr='|V0',
- data=(self.base.__array_interface__['data'][0], False),
- strides=(),
- version=3,
- )
- return i
-
-
-def _get_void_ptr(arr):
- """
- Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array
- """
- import numpy as np
- # convert to a 0d array that has a data pointer referrign to the start
- # of arr. This holds a reference to arr.
- simple_arr = np.asarray(_unsafe_first_element_pointer(arr))
-
- # create a `char[0]` using the same memory.
- c_arr = (ctypes.c_char * 0).from_buffer(simple_arr)
-
- # finally cast to void*
- return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p)
-
-
-class _ctypes(object):
+class _ctypes:
def __init__(self, array, ptr=None):
self._arr = array
if ctypes:
self._ctypes = ctypes
- # get a void pointer to the buffer, which keeps the array alive
- self._data = _get_void_ptr(array)
- assert self._data.value == ptr
+ self._data = self._ctypes.c_void_p(ptr)
else:
# fake a pointer-like object that holds onto the reference
self._ctypes = _missing_ctypes()
@@ -317,7 +273,14 @@ class _ctypes(object):
The returned pointer will keep a reference to the array.
"""
- return self._ctypes.cast(self._data, obj)
+ # _ctypes.cast function causes a circular reference of self._data in
+ # self._data._objects. Attributes of self._data cannot be released
+ # until gc.collect is called. Make a copy of the pointer first then let
+ # it hold the array reference. This is a workaround to circumvent the
+ # CPython bug https://bugs.python.org/issue12836
+ ptr = self._ctypes.cast(self._data, obj)
+ ptr._arr = self._arr
+ return ptr
def shape_as(self, obj):
"""
@@ -348,7 +311,7 @@ class _ctypes(object):
crashing. User Beware! The value of this attribute is exactly the same
as ``self._array_interface_['data'][0]``.
- Note that unlike `data_as`, a reference will not be kept to the array:
+ Note that unlike ``data_as``, a reference will not be kept to the array:
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
pointer to a deallocated array, and should be spelt
``(a + b).ctypes.data_as(ctypes.c_void_p)``
@@ -385,7 +348,7 @@ class _ctypes(object):
Enables `c_func(some_array.ctypes)`
"""
- return self._data
+ return self.data_as(ctypes.c_void_p)
# kept for compatibility
get_data = data.fget
@@ -401,7 +364,7 @@ def _newnames(datatype, order):
"""
oldnames = datatype.names
nameslist = list(oldnames)
- if isinstance(order, (str, unicode)):
+ if isinstance(order, str):
order = [order]
seen = set()
if isinstance(order, (list, tuple)):
@@ -558,7 +521,7 @@ _pep3118_unsupported_map = {
'X': 'function pointers',
}
-class _Stream(object):
+class _Stream:
def __init__(self, s):
self.s = s
self.byteorder = '@'
@@ -592,7 +555,6 @@ class _Stream(object):
def __bool__(self):
return bool(self.s)
- __nonzero__ = __bool__
def _dtype_from_pep3118(spec):
@@ -875,12 +837,12 @@ def npy_ctypes_check(cls):
# # (..., _ctypes._CData, object)
ctype_base = cls.__mro__[-2]
# right now, they're part of the _ctypes module
- return 'ctypes' in ctype_base.__module__
+ return '_ctypes' in ctype_base.__module__
except Exception:
return False
-class recursive(object):
+class recursive:
'''
A decorator class for recursive nested functions.
Naive recursive nested functions hold a reference to themselves:
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index 269e509b8..86ddf4d17 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -3,8 +3,6 @@ Array methods which are called by both the C-code for the method
and the Python code for the NumPy-namespace function
"""
-from __future__ import division, absolute_import, print_function
-
import warnings
from numpy.core import multiarray as mu
@@ -23,6 +21,17 @@ umr_prod = um.multiply.reduce
umr_any = um.logical_or.reduce
umr_all = um.logical_and.reduce
+# Complex types to -> (2,)float view for fast-path computation in _var()
+_complex_to_float = {
+ nt.dtype(nt.csingle) : nt.dtype(nt.single),
+ nt.dtype(nt.cdouble) : nt.dtype(nt.double),
+}
+# Special case for windows: ensure double takes precedence
+if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
+ _complex_to_float.update({
+ nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble),
+ })
+
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
def _amax(a, axis=None, out=None, keepdims=False,
@@ -54,7 +63,7 @@ def _count_reduce_items(arr, axis):
axis = (axis,)
items = 1
for ax in axis:
- items *= arr.shape[ax]
+ items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
return items
# Numpy 1.17.0, 2019-02-24
@@ -191,8 +200,16 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
+
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
x = um.multiply(x, x, out=x)
+ # Fast-paths for built-in complex types
+ elif x.dtype in _complex_to_float:
+ xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
+ um.multiply(xv, xv, out=xv)
+ x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
+ # Most general case; includes handling object arrays containing imaginary
+ # numbers and complex types with non-native byteorder
else:
x = um.multiply(x, um.conjugate(x), out=x).real
diff --git a/numpy/core/_type_aliases.py b/numpy/core/_type_aliases.py
index d6e1a1fb7..c26431443 100644
--- a/numpy/core/_type_aliases.py
+++ b/numpy/core/_type_aliases.py
@@ -23,7 +23,6 @@ and sometimes other mappings too.
"""
import warnings
-import sys
from numpy.compat import unicode
from numpy._globals import VisibleDeprecationWarning
@@ -203,22 +202,16 @@ def _set_up_aliases():
('bool_', 'bool'),
('bytes_', 'string'),
('string_', 'string'),
+ ('str_', 'unicode'),
('unicode_', 'unicode'),
('object_', 'object')]
- if sys.version_info[0] >= 3:
- type_pairs.extend([('str_', 'unicode')])
- else:
- type_pairs.extend([('str_', 'string')])
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['ulong', 'object', 'int', 'float',
- 'complex', 'bool', 'string', 'datetime', 'timedelta']
- if sys.version_info[0] >= 3:
- to_remove.extend(['bytes', 'str'])
- else:
- to_remove.extend(['unicode', 'long'])
+ 'complex', 'bool', 'string', 'datetime', 'timedelta',
+ 'bytes', 'str']
for t in to_remove:
try:
@@ -267,11 +260,8 @@ _set_array_types()
# Add additional strings to the sctypeDict
-_toadd = ['int', 'float', 'complex', 'bool', 'object']
-if sys.version_info[0] >= 3:
- _toadd.extend(['str', 'bytes', ('a', 'bytes_')])
-else:
- _toadd.extend(['string', ('str', 'string_'), 'unicode', ('a', 'string_')])
+_toadd = ['int', 'float', 'complex', 'bool', 'object',
+ 'str', 'bytes', ('a', 'bytes_')]
for name in _toadd:
if isinstance(name, tuple):
diff --git a/numpy/core/_ufunc_config.py b/numpy/core/_ufunc_config.py
index c3951cc09..454d911cf 100644
--- a/numpy/core/_ufunc_config.py
+++ b/numpy/core/_ufunc_config.py
@@ -3,14 +3,7 @@ Functions for changing global ufunc configuration
This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
"""
-from __future__ import division, absolute_import, print_function
-
-try:
- # Accessing collections abstract classes from collections
- # has been deprecated since Python 3.3
- import collections.abc as collections_abc
-except ImportError:
- import collections as collections_abc
+import collections.abc
import contextlib
from .overrides import set_module
@@ -290,7 +283,7 @@ def seterrcall(func):
Log error message:
- >>> class Log(object):
+ >>> class Log:
... def write(self, msg):
... print("LOG: %s" % msg)
...
@@ -309,8 +302,9 @@ def seterrcall(func):
OrderedDict([('divide', 'log'), ('invalid', 'log'), ('over', 'log'), ('under', 'log')])
"""
- if func is not None and not isinstance(func, collections_abc.Callable):
- if not hasattr(func, 'write') or not isinstance(func.write, collections_abc.Callable):
+ if func is not None and not isinstance(func, collections.abc.Callable):
+ if (not hasattr(func, 'write') or
+ not isinstance(func.write, collections.abc.Callable)):
raise ValueError("Only callable can be used as callback")
pyvals = umath.geterrobj()
old = geterrcall()
@@ -365,7 +359,7 @@ def geterrcall():
return umath.geterrobj()[2]
-class _unspecified(object):
+class _unspecified:
pass
@@ -431,11 +425,9 @@ class errstate(contextlib.ContextDecorator):
OrderedDict([('divide', 'ignore'), ('invalid', 'ignore'), ('over', 'ignore'), ('under', 'ignore')])
"""
- # Note that we don't want to run the above doctests because they will fail
- # without a from __future__ import with_statement
- def __init__(self, **kwargs):
- self.call = kwargs.pop('call', _Unspecified)
+ def __init__(self, *, call=_Unspecified, **kwargs):
+ self.call = call
self.kwargs = kwargs
def __enter__(self):
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 8a7626d9d..456ef76f0 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -3,8 +3,6 @@
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
"set_printoptions", "get_printoptions", "printoptions",
"format_float_positional", "format_float_scientific"]
@@ -24,28 +22,21 @@ __docformat__ = 'restructuredtext'
# scalars are printed inside an ndarray. Only the latter strs are currently
# user-customizable.
-import sys
import functools
import numbers
-if sys.version_info[0] >= 3:
- try:
- from _thread import get_ident
- except ImportError:
- from _dummy_thread import get_ident
-else:
- try:
- from thread import get_ident
- except ImportError:
- from dummy_thread import get_ident
+try:
+ from _thread import get_ident
+except ImportError:
+ from _dummy_thread import get_ident
import numpy as np
from . import numerictypes as _nt
-from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat
+from .umath import absolute, isinf, isfinite, isnat
from . import multiarray
from .multiarray import (array, dragon4_positional, dragon4_scientific,
datetime_as_string, datetime_data, ndarray,
set_legacy_print_mode)
-from .fromnumeric import ravel, any
+from .fromnumeric import any
from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
@@ -100,7 +91,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None,
@set_module('numpy')
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
- formatter=None, sign=None, floatmode=None, **kwarg):
+ formatter=None, sign=None, floatmode=None, *, legacy=None):
"""
Set printing options.
@@ -111,7 +102,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
----------
precision : int or None, optional
Number of digits of precision for floating point output (default 8).
- May be `None` if `floatmode` is not `fixed`, to print as many digits as
+ May be None if `floatmode` is not `fixed`, to print as many digits as
necessary to uniquely specify the value.
threshold : int, optional
Total number of array elements which trigger summarization
@@ -249,11 +240,6 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
"""
- legacy = kwarg.pop('legacy', None)
- if kwarg:
- msg = "set_printoptions() got unexpected keyword argument '{}'"
- raise TypeError(msg.format(kwarg.popitem()[0]))
-
opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
suppress, nanstr, infstr, sign, formatter,
floatmode, legacy)
@@ -369,23 +355,22 @@ def repr_format(x):
def str_format(x):
return str(x)
-def _get_formatdict(data, **opt):
- prec, fmode = opt['precision'], opt['floatmode']
- supp, sign = opt['suppress'], opt['sign']
- legacy = opt['legacy']
+def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
+ formatter, **kwargs):
+ # note: extra arguments in kwargs are ignored
# wrapped in lambdas to avoid taking a code path with the wrong type of data
formatdict = {
'bool': lambda: BoolFormat(data),
'int': lambda: IntegerFormat(data),
- 'float': lambda:
- FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
- 'longfloat': lambda:
- FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
- 'complexfloat': lambda:
- ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
- 'longcomplexfloat': lambda:
- ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
+ 'float': lambda: FloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'longfloat': lambda: FloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'complexfloat': lambda: ComplexFloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'longcomplexfloat': lambda: ComplexFloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
'datetime': lambda: DatetimeFormat(data, legacy=legacy),
'timedelta': lambda: TimedeltaFormat(data),
'object': lambda: _object_format,
@@ -398,7 +383,6 @@ def _get_formatdict(data, **opt):
def indirect(x):
return lambda: x
- formatter = opt['formatter']
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
@@ -525,7 +509,7 @@ def _array2string_dispatcher(
suppress_small=None, separator=None, prefix=None,
style=None, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix=None,
- **kwarg):
+ *, legacy=None):
return (a,)
@@ -534,7 +518,7 @@ def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix="",
- **kwarg):
+ *, legacy=None):
"""
Return a string representation of an array.
@@ -679,10 +663,6 @@ def array2string(a, max_line_width=None, precision=None,
'[0x0 0x1 0x2]'
"""
- legacy = kwarg.pop('legacy', None)
- if kwarg:
- msg = "array2string() got unexpected keyword argument '{}'"
- raise TypeError(msg.format(kwarg.popitem()[0]))
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
@@ -851,15 +831,15 @@ def _none_or_positive_arg(x, name):
raise ValueError("{} must be >= 0".format(name))
return x
-class FloatingFormat(object):
+class FloatingFormat:
""" Formatter for subtypes of np.floating """
def __init__(self, data, precision, floatmode, suppress_small, sign=False,
- **kwarg):
+ *, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
- self._legacy = kwarg.get('legacy', False)
+ self._legacy = legacy
if self._legacy == '1.13':
# when not 0d, legacy does not support '-'
if data.shape != () and sign == '-':
@@ -1140,7 +1120,7 @@ def format_float_positional(x, precision=None, unique=True,
pad_right=pad_right)
-class IntegerFormat(object):
+class IntegerFormat:
def __init__(self, data):
if data.size > 0:
max_str_len = max(len(str(np.max(data))),
@@ -1153,7 +1133,7 @@ class IntegerFormat(object):
return self.format % x
-class BoolFormat(object):
+class BoolFormat:
def __init__(self, data, **kwargs):
# add an extra space so " True" and "False" have the same length and
# array elements align nicely when printed, except in 0d arrays
@@ -1163,23 +1143,27 @@ class BoolFormat(object):
return self.truestr if x else "False"
-class ComplexFloatingFormat(object):
+class ComplexFloatingFormat:
""" Formatter for subtypes of np.complexfloating """
def __init__(self, x, precision, floatmode, suppress_small,
- sign=False, **kwarg):
+ sign=False, *, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
floatmode_real = floatmode_imag = floatmode
- if kwarg.get('legacy', False) == '1.13':
+ if legacy == '1.13':
floatmode_real = 'maxprec_equal'
floatmode_imag = 'maxprec'
- self.real_format = FloatingFormat(x.real, precision, floatmode_real,
- suppress_small, sign=sign, **kwarg)
- self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag,
- suppress_small, sign='+', **kwarg)
+ self.real_format = FloatingFormat(
+ x.real, precision, floatmode_real, suppress_small,
+ sign=sign, legacy=legacy
+ )
+ self.imag_format = FloatingFormat(
+ x.imag, precision, floatmode_imag, suppress_small,
+ sign='+', legacy=legacy
+ )
def __call__(self, x):
r = self.real_format(x.real)
@@ -1192,7 +1176,7 @@ class ComplexFloatingFormat(object):
return r + i
-class _TimelikeFormat(object):
+class _TimelikeFormat:
def __init__(self, data):
non_nat = data[~isnat(data)]
if len(non_nat) > 0:
@@ -1255,7 +1239,7 @@ class TimedeltaFormat(_TimelikeFormat):
return str(x.astype('i8'))
-class SubArrayFormat(object):
+class SubArrayFormat:
def __init__(self, format_function):
self.format_function = format_function
@@ -1265,7 +1249,7 @@ class SubArrayFormat(object):
return "[" + ", ".join(self.__call__(a) for a in arr) + "]"
-class StructuredVoidFormat(object):
+class StructuredVoidFormat:
"""
Formatter for structured np.void objects.
diff --git a/numpy/core/code_generators/__init__.py b/numpy/core/code_generators/__init__.py
index 1d0f69b67..e69de29bb 100644
--- a/numpy/core/code_generators/__init__.py
+++ b/numpy/core/code_generators/__init__.py
@@ -1 +0,0 @@
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 00f10df57..5daa52d79 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -47,4 +47,7 @@
# Deprecate PyArray_SetNumericOps and PyArray_GetNumericOps,
# Add fields core_dim_flags and core_dim_sizes to PyUFuncObject.
# Add PyUFunc_FromFuncAndDataAndSignatureAndIdentity to ufunc_funcs_api.
+# Version 13 (NumPy 1.17) No change.
+# Version 13 (NumPy 1.18) No change.
+# Version 13 (NumPy 1.19) No change.
0x0000000d = 5b0e8bbded00b166125974fc71e80a33
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index 7336e5e13..88dc2d90a 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -6,11 +6,13 @@ See ``find_function`` for how functions should be formatted, and
specified.
"""
-from __future__ import division, absolute_import, print_function
+from numpy.distutils.conv_template import process_file as process_c_file
-import sys, os, re
import hashlib
-
+import io
+import os
+import re
+import sys
import textwrap
from os.path import join
@@ -73,7 +75,7 @@ def _repl(str):
return str.replace('Bool', 'npy_bool')
-class StealRef(object):
+class StealRef:
def __init__(self, arg):
self.arg = arg # counting from 1
@@ -84,7 +86,7 @@ class StealRef(object):
return 'NPY_STEALS_REF_TO_ARG(%d)' % self.arg
-class NonNull(object):
+class NonNull:
def __init__(self, arg):
self.arg = arg # counting from 1
@@ -95,7 +97,7 @@ class NonNull(object):
return 'NPY_GCC_NONNULL(%d)' % self.arg
-class Function(object):
+class Function:
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
@@ -215,7 +217,10 @@ def find_functions(filename, tag='API'):
This function does foo...
*/
"""
- fo = open(filename, 'r')
+ if filename.endswith(('.c.src', '.h.src')):
+ fo = io.StringIO(process_c_file(filename))
+ else:
+ fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
@@ -303,7 +308,7 @@ def write_file(filename, data):
# Those *Api classes instances know how to output strings for the generated code
-class TypeApi(object):
+class TypeApi:
def __init__(self, name, index, ptr_cast, api_name):
self.index = index
self.name = name
@@ -325,7 +330,7 @@ extern NPY_NO_EXPORT PyTypeObject %(type)s;
""" % {'type': self.name}
return astr
-class GlobalVarApi(object):
+class GlobalVarApi:
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
@@ -349,7 +354,7 @@ extern NPY_NO_EXPORT %(type)s %(name)s;
# Dummy to be able to consistently use *Api instances for all items in the
# array api
-class BoolValuesApi(object):
+class BoolValuesApi:
def __init__(self, name, index, api_name):
self.name = name
self.index = index
@@ -371,7 +376,7 @@ extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
"""
return astr
-class FunctionApi(object):
+class FunctionApi:
def __init__(self, name, index, annotations, return_type, args, api_name):
self.name = name
self.index = index
diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py
index 5e04fb86d..fe21bc543 100644
--- a/numpy/core/code_generators/generate_numpy_api.py
+++ b/numpy/core/code_generators/generate_numpy_api.py
@@ -1,5 +1,3 @@
-from __future__ import division, print_function
-
import os
import genapi
@@ -59,21 +57,12 @@ _import_array(void)
return -1;
}
-#if PY_VERSION_HEX >= 0x03000000
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
-#else
- if (!PyCObject_Check(c_api)) {
- PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
- Py_DECREF(c_api);
- return -1;
- }
- PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
-#endif
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
@@ -120,13 +109,7 @@ _import_array(void)
return 0;
}
-#if PY_VERSION_HEX >= 0x03000000
-#define NUMPY_IMPORT_ARRAY_RETVAL NULL
-#else
-#define NUMPY_IMPORT_ARRAY_RETVAL
-#endif
-
-#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
+#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
diff --git a/numpy/core/code_generators/generate_ufunc_api.py b/numpy/core/code_generators/generate_ufunc_api.py
index 1b0143e88..04c023675 100644
--- a/numpy/core/code_generators/generate_ufunc_api.py
+++ b/numpy/core/code_generators/generate_ufunc_api.py
@@ -1,12 +1,9 @@
-from __future__ import division, print_function
-
import os
import genapi
import numpy_api
-from genapi import \
- TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
+from genapi import TypeApi, FunctionApi
h_template = r"""
#ifdef _UMATHMODULE
@@ -51,21 +48,12 @@ _import_umath(void)
return -1;
}
-#if PY_VERSION_HEX >= 0x03000000
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL);
-#else
- if (!PyCObject_Check(c_api)) {
- PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object");
- Py_DECREF(c_api);
- return -1;
- }
- PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api);
-#endif
Py_DECREF(c_api);
if (PyUFunc_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer");
@@ -74,12 +62,6 @@ _import_umath(void)
return 0;
}
-#if PY_VERSION_HEX >= 0x03000000
-#define NUMPY_IMPORT_UMATH_RETVAL NULL
-#else
-#define NUMPY_IMPORT_UMATH_RETVAL
-#endif
-
#define import_umath() \
do {\
UFUNC_NOFPE\
@@ -87,7 +69,7 @@ _import_umath(void)
PyErr_Print();\
PyErr_SetString(PyExc_ImportError,\
"numpy.core.umath failed to import");\
- return NUMPY_IMPORT_UMATH_RETVAL;\
+ return NULL;\
}\
} while(0)
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 0d3bbffe9..c14711d16 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -1,5 +1,3 @@
-from __future__ import division, print_function
-
import os
import re
import struct
@@ -21,16 +19,16 @@ ReorderableNone = "(Py_INCREF(Py_None), Py_None)"
# Sentinel value to specify using the full type description in the
# function name
-class FullTypeDescr(object):
+class FullTypeDescr:
pass
-class FuncNameSuffix(object):
+class FuncNameSuffix:
"""Stores the suffix to append when generating functions names.
"""
def __init__(self, suffix):
self.suffix = suffix
-class TypeDescription(object):
+class TypeDescription:
"""Type signature for a ufunc.
Attributes
@@ -120,7 +118,7 @@ def TD(types, f=None, astype=None, in_=None, out=None, simd=None):
tds.append(TypeDescription(t, f=fd, in_=i, out=o, astype=astype, simd=simdt))
return tds
-class Ufunc(object):
+class Ufunc:
"""Description of a ufunc.
Attributes
@@ -132,7 +130,7 @@ class Ufunc(object):
type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
- *type_descriptions, **kwargs):
+ *type_descriptions, signature=None):
self.nin = nin
self.nout = nout
if identity is None:
@@ -141,23 +139,17 @@ class Ufunc(object):
self.docstring = docstring
self.typereso = typereso
self.type_descriptions = []
- self.signature = kwargs.pop('signature', None)
+ self.signature = signature
for td in type_descriptions:
self.type_descriptions.extend(td)
for td in self.type_descriptions:
td.finish_signature(self.nin, self.nout)
- if kwargs:
- raise ValueError('unknown kwargs %r' % str(kwargs))
# String-handling utilities to avoid locale-dependence.
import string
-if sys.version_info[0] < 3:
- UPPER_TABLE = string.maketrans(string.ascii_lowercase,
- string.ascii_uppercase)
-else:
- UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"),
- bytes(string.ascii_uppercase, "ascii"))
+UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"),
+ bytes(string.ascii_uppercase, "ascii"))
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
@@ -226,7 +218,9 @@ chartoname = {
'P': 'OBJECT',
}
-all = '?bBhHiIlLqQefdgFDGOMm'
+noobj = '?bBhHiIlLqQefdgFDGmM'
+all = '?bBhHiIlLqQefdgFDGOmM'
+
O = 'O'
P = 'P'
ints = 'bBhHiIlLqQ'
@@ -239,6 +233,7 @@ flts = 'efdg'
fltsO = flts + O
fltsP = flts + P
cmplx = 'FDG'
+cmplxvec = 'FD'
cmplxO = cmplx + O
cmplxP = cmplx + P
inexact = flts + cmplx
@@ -246,10 +241,8 @@ inexactvec = 'fd'
noint = inexact+O
nointP = inexact+P
allP = bints+times+flts+cmplxP
-nobool = all[1:]
-noobj = all[:-3]+all[-2:]
-nobool_or_obj = all[1:-3]+all[-2:]
-nobool_or_datetime = all[1:-2]+all[-1:]
+nobool_or_obj = noobj[1:]
+nobool_or_datetime = noobj[1:-1] + O # includes m - timedelta64
intflt = ints+flts
intfltcmplx = ints+flts+cmplx
nocmplx = bints+times+flts
@@ -276,7 +269,7 @@ defdict = {
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.add'),
'PyUFunc_AdditionTypeResolver',
- TD(notimes_or_obj, simd=[('avx2', ints)]),
+ TD(notimes_or_obj, simd=[('avx512f', cmplxvec),('avx2', ints)]),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'mM', 'M'),
@@ -287,7 +280,7 @@ defdict = {
Ufunc(2, 1, None, # Zero is only a unit to the right, not the left
docstrings.get('numpy.core.umath.subtract'),
'PyUFunc_SubtractionTypeResolver',
- TD(notimes_or_obj, simd=[('avx2', ints)]),
+ TD(ints + inexact, simd=[('avx512f', cmplxvec),('avx2', ints)]),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'MM', 'm'),
@@ -298,7 +291,7 @@ defdict = {
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.multiply'),
'PyUFunc_MultiplicationTypeResolver',
- TD(notimes_or_obj, simd=[('avx2', ints)]),
+ TD(notimes_or_obj, simd=[('avx512f', cmplxvec),('avx2', ints)]),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'qm', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
@@ -306,17 +299,7 @@ defdict = {
],
TD(O, f='PyNumber_Multiply'),
),
-'divide':
- Ufunc(2, 1, None, # One is only a unit to the right, not the left
- docstrings.get('numpy.core.umath.divide'),
- 'PyUFunc_MixedDivisionTypeResolver',
- TD(intfltcmplx),
- [TypeDescription('m', FullTypeDescr, 'mq', 'm'),
- TypeDescription('m', FullTypeDescr, 'md', 'm'),
- TypeDescription('m', FullTypeDescr, 'mm', 'd'),
- ],
- TD(O, f='PyNumber_Divide'),
- ),
+#'divide' : aliased to true_divide in umathmodule.c:initumath
'floor_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.floor_divide'),
@@ -343,7 +326,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.conjugate'),
None,
- TD(ints+flts+cmplx, simd=[('avx2', ints)]),
+ TD(ints+flts+cmplx, simd=[('avx2', ints), ('avx512f', cmplxvec)]),
TD(P, f='conjugate'),
),
'fmod':
@@ -358,7 +341,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.square'),
None,
- TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f', 'fd')]),
+ TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f', 'FDfd')]),
TD(O, f='Py_square'),
),
'reciprocal':
@@ -396,7 +379,7 @@ defdict = {
docstrings.get('numpy.core.umath.absolute'),
'PyUFunc_AbsoluteTypeResolver',
TD(bints+flts+timedeltaonly, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
- TD(cmplx, out=('f', 'd', 'g')),
+ TD(cmplx, simd=[('avx512f', cmplxvec)], out=('f', 'd', 'g')),
TD(O, f='PyNumber_Absolute'),
),
'_arg':
@@ -409,7 +392,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.negative'),
'PyUFunc_NegativeTypeResolver',
- TD(bints+flts+timedeltaonly, simd=[('avx2', ints)]),
+ TD(ints+flts+timedeltaonly, simd=[('avx2', ints)]),
TD(cmplx, f='neg'),
TD(O, f='PyNumber_Negative'),
),
@@ -433,6 +416,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'greater_equal':
Ufunc(2, 1, None,
@@ -440,6 +424,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'less':
Ufunc(2, 1, None,
@@ -447,6 +432,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'less_equal':
Ufunc(2, 1, None,
@@ -454,6 +440,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'equal':
Ufunc(2, 1, None,
@@ -461,6 +448,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'not_equal':
Ufunc(2, 1, None,
@@ -468,6 +456,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'logical_and':
Ufunc(2, 1, True_,
@@ -475,6 +464,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalAnd'),
+ TD(O, f='npy_ObjectLogicalAnd', out='?'),
),
'logical_not':
Ufunc(1, 1, None,
@@ -482,6 +472,7 @@ defdict = {
None,
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalNot'),
+ TD(O, f='npy_ObjectLogicalNot', out='?'),
),
'logical_or':
Ufunc(2, 1, False_,
@@ -489,6 +480,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalOr'),
+ TD(O, f='npy_ObjectLogicalOr', out='?'),
),
'logical_xor':
Ufunc(2, 1, False_,
@@ -501,14 +493,14 @@ defdict = {
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.maximum'),
'PyUFunc_SimpleUniformOperationTypeResolver',
- TD(noobj),
+ TD(noobj, simd=[('avx512f', 'fd')]),
TD(O, f='npy_ObjectMax')
),
'minimum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.minimum'),
'PyUFunc_SimpleUniformOperationTypeResolver',
- TD(noobj),
+ TD(noobj, simd=[('avx512f', 'fd')]),
TD(O, f='npy_ObjectMin')
),
'clip':
@@ -849,8 +841,8 @@ defdict = {
'isnan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnan'),
- None,
- TD(nodatetime_or_obj, out='?'),
+ 'PyUFunc_IsFiniteTypeResolver',
+ TD(noobj, out='?'),
),
'isnat':
Ufunc(1, 1, None,
@@ -861,8 +853,8 @@ defdict = {
'isinf':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isinf'),
- None,
- TD(nodatetime_or_obj, out='?'),
+ 'PyUFunc_IsFiniteTypeResolver',
+ TD(noobj, out='?'),
),
'isfinite':
Ufunc(1, 1, None,
@@ -948,10 +940,6 @@ defdict = {
),
}
-if sys.version_info[0] >= 3:
- # Will be aliased to true_divide in umathmodule.c.src:InitOtherOperators
- del defdict['divide']
-
def indent(st, spaces):
indentation = ' '*spaces
indented = indentation + st.replace('\n', '\n'+indentation)
@@ -1027,7 +1015,7 @@ def make_arrays(funcdict):
for vt in t.simd:
code2list.append(textwrap.dedent("""\
#ifdef HAVE_ATTRIBUTE_TARGET_{ISA}
- if (npy_cpu_supports("{isa}")) {{
+ if (NPY_CPU_HAVE({ISA})) {{
{fname}_functions[{idx}] = {type}_{fname}_{isa};
}}
#endif
@@ -1086,15 +1074,9 @@ def make_ufuncs(funcdict):
uf = funcdict[name]
mlist = []
docstring = textwrap.dedent(uf.docstring).strip()
- if sys.version_info[0] < 3:
- docstring = docstring.encode('string-escape')
- docstring = docstring.replace(r'"', r'\"')
- else:
- docstring = docstring.encode('unicode-escape').decode('ascii')
- docstring = docstring.replace(r'"', r'\"')
- # XXX: I don't understand why the following replace is not
- # necessary in the python 2 case.
- docstring = docstring.replace(r"'", r"\'")
+ docstring = docstring.encode('unicode-escape').decode('ascii')
+ docstring = docstring.replace(r'"', r'\"')
+ docstring = docstring.replace(r"'", r"\'")
# Split the docstring because some compilers (like MS) do not like big
# string literal in C code. We split at endlines because textwrap.wrap
# do not play well with \n
@@ -1157,7 +1139,6 @@ def make_code(funcdict, filename):
Please make changes to the code generator program (%s)
**/
- #include "cpuid.h"
#include "ufunc_object.h"
#include "ufunc_type_resolution.h"
#include "loops.h"
diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py
index a71c236fd..916fb537e 100644
--- a/numpy/core/code_generators/numpy_api.py
+++ b/numpy/core/code_generators/numpy_api.py
@@ -13,8 +13,6 @@ When adding a function, make sure to use the next integer not used as an index
exception, so it should hopefully not get unnoticed).
"""
-from __future__ import division, absolute_import, print_function
-
from code_generators.genapi import StealRef, NonNull
# index, type
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 1ac477b54..129516658 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -9,7 +9,6 @@ for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
-from __future__ import division, absolute_import, print_function
import textwrap
docdict = {}
@@ -22,7 +21,7 @@ subst = {
'PARAMS': textwrap.dedent("""
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
- a shape that the inputs broadcast to. If not provided or `None`,
+ a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
@@ -37,8 +36,8 @@ subst = {
:ref:`ufunc docs <ufuncs.kwargs>`.
""").strip(),
'BROADCASTABLE_2': ("If ``x1.shape != x2.shape``, they must be "
- "broadcastable to a common shape (which becomes the "
- "shape of the output)."),
+ "broadcastable to a common\n shape (which becomes "
+ "the shape of the output)."),
'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.",
'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.",
}
@@ -117,7 +116,8 @@ add_newdoc('numpy.core.umath', 'add',
Parameters
----------
x1, x2 : array_like
- The arrays to be added. $BROADCASTABLE_2
+ The arrays to be added.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -443,7 +443,8 @@ add_newdoc('numpy.core.umath', 'arctan2',
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
- `x`-coordinates. $BROADCASTABLE_2
+ `x`-coordinates.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -566,7 +567,8 @@ add_newdoc('numpy.core.umath', 'bitwise_and',
Parameters
----------
x1, x2 : array_like
- Only integer and boolean types are handled. $BROADCASTABLE_2
+ Only integer and boolean types are handled.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -619,7 +621,8 @@ add_newdoc('numpy.core.umath', 'bitwise_or',
Parameters
----------
x1, x2 : array_like
- Only integer and boolean types are handled. $BROADCASTABLE_2
+ Only integer and boolean types are handled.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -677,7 +680,8 @@ add_newdoc('numpy.core.umath', 'bitwise_xor',
Parameters
----------
x1, x2 : array_like
- Only integer and boolean types are handled. $BROADCASTABLE_2
+ Only integer and boolean types are handled.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -987,7 +991,8 @@ add_newdoc('numpy.core.umath', 'heaviside',
x1 : array_like
Input values.
x2 : array_like
- The value of the function when x1 is 0. $BROADCASTABLE_2
+ The value of the function when x1 is 0.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1022,7 +1027,8 @@ add_newdoc('numpy.core.umath', 'divide',
x1 : array_like
Dividend array.
x2 : array_like
- Divisor array. $BROADCASTABLE_2
+ Divisor array.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1091,7 +1097,8 @@ add_newdoc('numpy.core.umath', 'equal',
Parameters
----------
x1, x2 : array_like
- Input arrays. $BROADCASTABLE_2
+ Input arrays.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1223,7 +1230,7 @@ add_newdoc('numpy.core.umath', 'expm1',
Parameters
----------
x : array_like
- Input values.
+ Input values.
$PARAMS
Returns
@@ -1338,7 +1345,8 @@ add_newdoc('numpy.core.umath', 'floor_divide',
x1 : array_like
Numerator.
x2 : array_like
- Denominator. $BROADCASTABLE_2
+ Denominator.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1378,7 +1386,8 @@ add_newdoc('numpy.core.umath', 'fmod',
x1 : array_like
Dividend.
x2 : array_like
- Divisor. $BROADCASTABLE_2
+ Divisor.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1428,7 +1437,8 @@ add_newdoc('numpy.core.umath', 'greater',
Parameters
----------
x1, x2 : array_like
- Input arrays. $BROADCASTABLE_2
+ Input arrays.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1464,7 +1474,8 @@ add_newdoc('numpy.core.umath', 'greater_equal',
Parameters
----------
x1, x2 : array_like
- Input arrays. $BROADCASTABLE_2
+ Input arrays.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1497,7 +1508,8 @@ add_newdoc('numpy.core.umath', 'hypot',
Parameters
----------
x1, x2 : array_like
- Leg of the triangle(s). $BROADCASTABLE_2
+ Leg of the triangle(s).
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1832,7 +1844,8 @@ add_newdoc('numpy.core.umath', 'less',
Parameters
----------
x1, x2 : array_like
- Input arrays. $BROADCASTABLE_2
+ Input arrays.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1860,7 +1873,8 @@ add_newdoc('numpy.core.umath', 'less_equal',
Parameters
----------
x1, x2 : array_like
- Input arrays. $BROADCASTABLE_2
+ Input arrays.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2044,7 +2058,8 @@ add_newdoc('numpy.core.umath', 'logaddexp',
Parameters
----------
x1, x2 : array_like
- Input values. $BROADCASTABLE_2
+ Input values.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2086,7 +2101,8 @@ add_newdoc('numpy.core.umath', 'logaddexp2',
Parameters
----------
x1, x2 : array_like
- Input values. $BROADCASTABLE_2
+ Input values.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2177,7 +2193,8 @@ add_newdoc('numpy.core.umath', 'logical_and',
Parameters
----------
x1, x2 : array_like
- Input arrays. $BROADCASTABLE_2
+ Input arrays.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2282,7 +2299,8 @@ add_newdoc('numpy.core.umath', 'logical_xor',
Parameters
----------
x1, x2 : array_like
- Logical XOR is applied to the elements of `x1` and `x2`. $BROADCASTABLE_2
+ Logical XOR is applied to the elements of `x1` and `x2`.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2329,7 +2347,8 @@ add_newdoc('numpy.core.umath', 'maximum',
Parameters
----------
x1, x2 : array_like
- The arrays holding the elements to be compared. $BROADCASTABLE_2
+ The arrays holding the elements to be compared.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2387,7 +2406,8 @@ add_newdoc('numpy.core.umath', 'minimum',
Parameters
----------
x1, x2 : array_like
- The arrays holding the elements to be compared. $BROADCASTABLE_2
+ The arrays holding the elements to be compared.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2445,7 +2465,8 @@ add_newdoc('numpy.core.umath', 'fmax',
Parameters
----------
x1, x2 : array_like
- The arrays holding the elements to be compared. $BROADCASTABLE_2
+ The arrays holding the elements to be compared.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2502,7 +2523,8 @@ add_newdoc('numpy.core.umath', 'fmin',
Parameters
----------
x1, x2 : array_like
- The arrays holding the elements to be compared. $BROADCASTABLE_2
+ The arrays holding the elements to be compared.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2596,7 +2618,7 @@ add_newdoc('numpy.core.umath', 'matmul',
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
- provided or `None`, a freshly-allocated array is returned.
+ provided or None, a freshly-allocated array is returned.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
@@ -2755,7 +2777,8 @@ add_newdoc('numpy.core.umath', 'multiply',
Parameters
----------
x1, x2 : array_like
- Input arrays to be multiplied. $BROADCASTABLE_2
+ Input arrays to be multiplied.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2836,7 +2859,8 @@ add_newdoc('numpy.core.umath', 'not_equal',
Parameters
----------
x1, x2 : array_like
- Input arrays. $BROADCASTABLE_2
+ Input arrays.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2885,7 +2909,8 @@ add_newdoc('numpy.core.umath', 'power',
x1 : array_like
The bases.
x2 : array_like
- The exponents. $BROADCASTABLE_2
+ The exponents.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -2944,7 +2969,8 @@ add_newdoc('numpy.core.umath', 'float_power',
x1 : array_like
The bases.
x2 : array_like
- The exponents. $BROADCASTABLE_2
+ The exponents.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -3116,7 +3142,8 @@ add_newdoc('numpy.core.umath', 'remainder',
x1 : array_like
Dividend array.
x2 : array_like
- Divisor array. $BROADCASTABLE_2
+ Divisor array.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -3162,7 +3189,8 @@ add_newdoc('numpy.core.umath', 'divmod',
x1 : array_like
Dividend array.
x2 : array_like
- Divisor array. $BROADCASTABLE_2
+ Divisor array.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -3201,7 +3229,8 @@ add_newdoc('numpy.core.umath', 'right_shift',
x1 : array_like, int
Input values.
x2 : array_like, int
- Number of bits to remove at the right of `x1`. $BROADCASTABLE_2
+ Number of bits to remove at the right of `x1`.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -3335,7 +3364,8 @@ add_newdoc('numpy.core.umath', 'copysign',
x1 : array_like
Values to change the sign of.
x2 : array_like
- The sign of `x2` is copied to `x1`. $BROADCASTABLE_2
+ The sign of `x2` is copied to `x1`.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -3642,7 +3672,8 @@ add_newdoc('numpy.core.umath', 'subtract',
Parameters
----------
x1, x2 : array_like
- The arrays to be subtracted from each other. $BROADCASTABLE_2
+ The arrays to be subtracted from each other.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -3783,7 +3814,8 @@ add_newdoc('numpy.core.umath', 'true_divide',
x1 : array_like
Dividend array.
x2 : array_like
- Divisor array. $BROADCASTABLE_2
+ Divisor array.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -3793,12 +3825,7 @@ add_newdoc('numpy.core.umath', 'true_divide',
Notes
-----
- The floor division operator ``//`` was added in Python 2.2 making
- ``//`` and ``/`` equivalent operators. The default floor division
- operation of ``/`` can be replaced by true division with ``from
- __future__ import division``.
-
- In Python 3.0, ``//`` is the floor division operator and ``/`` the
+ In Python, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
@@ -3808,15 +3835,11 @@ add_newdoc('numpy.core.umath', 'true_divide',
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
- >>> x//4
- array([0, 0, 0, 0, 1])
-
- >>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
+
>>> x//4
array([0, 0, 0, 0, 1])
-
""")
add_newdoc('numpy.core.umath', 'frexp',
@@ -3880,7 +3903,8 @@ add_newdoc('numpy.core.umath', 'ldexp',
x1 : array_like
Array of multipliers.
x2 : array_like, int
- Array of twos exponents. $BROADCASTABLE_2
+ Array of twos exponents.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -3918,7 +3942,8 @@ add_newdoc('numpy.core.umath', 'gcd',
Parameters
----------
x1, x2 : array_like, int
- Arrays of values. $BROADCASTABLE_2
+ Arrays of values.
+ $BROADCASTABLE_2
Returns
-------
@@ -3948,7 +3973,8 @@ add_newdoc('numpy.core.umath', 'lcm',
Parameters
----------
x1, x2 : array_like, int
- Arrays of values. $BROADCASTABLE_2
+ Arrays of values.
+ $BROADCASTABLE_2
Returns
-------
diff --git a/numpy/core/cversions.py b/numpy/core/cversions.py
index 7995dd993..00159c3a8 100644
--- a/numpy/core/cversions.py
+++ b/numpy/core/cversions.py
@@ -3,8 +3,6 @@
The API has is defined by numpy_api_order and ufunc_api_order.
"""
-from __future__ import division, absolute_import, print_function
-
from os.path import dirname
from code_generators.genapi import fullapi_hash
diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py
index a941c5b81..cd01c0e77 100644
--- a/numpy/core/defchararray.py
+++ b/numpy/core/defchararray.py
@@ -15,17 +15,16 @@ available in your version of Python.
The preferred alias for `defchararray` is `numpy.char`.
"""
-from __future__ import division, absolute_import, print_function
-
import functools
import sys
-from .numerictypes import string_, unicode_, integer, object_, bool_, character
+from .numerictypes import (
+ string_, unicode_, integer, int_, object_, bool_, character)
from .numeric import ndarray, compare_chararrays
from .numeric import array as narray
from numpy.core.multiarray import _vec_string
from numpy.core.overrides import set_module
from numpy.core import overrides
-from numpy.compat import asbytes, long
+from numpy.compat import asbytes
import numpy
__all__ = [
@@ -42,13 +41,6 @@ __all__ = [
_globalvar = 0
-if sys.version_info[0] >= 3:
- _unicode = str
- _bytes = bytes
-else:
- _unicode = unicode
- _bytes = str
-_len = len
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.char')
@@ -63,7 +55,7 @@ def _use_unicode(*args):
result should be unicode.
"""
for x in args:
- if (isinstance(x, _unicode) or
+ if (isinstance(x, str) or
issubclass(numpy.asarray(x).dtype.type, unicode_)):
return unicode_
return string_
@@ -82,7 +74,7 @@ def _clean_args(*args):
Many of the Python string operations that have optional arguments
do not use 'None' to indicate a default value. In these cases,
- we need to remove all `None` arguments, and those following them.
+ we need to remove all None arguments, and those following them.
"""
newargs = []
for chk in args:
@@ -283,9 +275,12 @@ def str_len(a):
See also
--------
- __builtin__.len
+ builtins.len
"""
- return _vec_string(a, integer, '__len__')
+ # Note: __len__, etc. currently return ints, which are not C-integers.
+ # Generally intp would be expected for lengths, although int is sufficient
+ # due to the dtype itemsize limitation.
+ return _vec_string(a, int_, '__len__')
@array_function_dispatch(_binary_op_dispatcher)
@@ -345,7 +340,7 @@ def multiply(a, i):
i_arr = numpy.asarray(i)
if not issubclass(i_arr.dtype.type, integer):
raise ValueError("Can only multiply by integers")
- out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0)
+ out_size = _get_num_chars(a_arr) * max(int(i_arr.max()), 0)
return _vec_string(
a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,))
@@ -358,7 +353,7 @@ def _mod_dispatcher(a, values):
def mod(a, values):
"""
Return (a % i), that is pre-Python 2.6 string formatting
- (iterpolation), element-wise for a pair of array_likes of str
+ (interpolation), element-wise for a pair of array_likes of str
or unicode.
Parameters
@@ -455,7 +450,7 @@ def center(a, width, fillchar=' '):
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
+ size = int(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
@@ -509,7 +504,7 @@ def count(a, sub, start=0, end=None):
array([1, 0, 0])
"""
- return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end))
+ return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))
def _code_dispatcher(a, encoding=None, errors=None):
@@ -719,7 +714,7 @@ def find(a, sub, start=0, end=None):
"""
return _vec_string(
- a, integer, 'find', [sub, start] + _clean_args(end))
+ a, int_, 'find', [sub, start] + _clean_args(end))
@array_function_dispatch(_count_dispatcher)
@@ -748,7 +743,7 @@ def index(a, sub, start=0, end=None):
"""
return _vec_string(
- a, integer, 'index', [sub, start] + _clean_args(end))
+ a, int_, 'index', [sub, start] + _clean_args(end))
@array_function_dispatch(_unary_op_dispatcher)
@@ -1000,7 +995,7 @@ def ljust(a, width, fillchar=' '):
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
+ size = int(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
@@ -1208,7 +1203,7 @@ def rfind(a, sub, start=0, end=None):
"""
return _vec_string(
- a, integer, 'rfind', [sub, start] + _clean_args(end))
+ a, int_, 'rfind', [sub, start] + _clean_args(end))
@array_function_dispatch(_count_dispatcher)
@@ -1238,7 +1233,7 @@ def rindex(a, sub, start=0, end=None):
"""
return _vec_string(
- a, integer, 'rindex', [sub, start] + _clean_args(end))
+ a, int_, 'rindex', [sub, start] + _clean_args(end))
@array_function_dispatch(_just_dispatcher)
@@ -1270,7 +1265,7 @@ def rjust(a, width, fillchar=' '):
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
+ size = int(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
@@ -1333,7 +1328,7 @@ def rsplit(a, sep=None, maxsplit=None):
a : array_like of str or unicode
sep : str or unicode, optional
- If `sep` is not specified or `None`, any whitespace string
+ If `sep` is not specified or None, any whitespace string
is a separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done,
@@ -1417,7 +1412,7 @@ def split(a, sep=None, maxsplit=None):
a : array_like of str or unicode
sep : str or unicode, optional
- If `sep` is not specified or `None`, any whitespace string is a
+ If `sep` is not specified or None, any whitespace string is a
separator.
maxsplit : int, optional
@@ -1738,7 +1733,7 @@ def zfill(a, width):
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
+ size = int(numpy.max(width_arr.flat))
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))
@@ -1917,7 +1912,7 @@ class chararray(ndarray):
unicode : bool, optional
Are the array elements of type unicode (True) or string (False).
Default is False.
- buffer : int, optional
+ buffer : object exposing the buffer interface or str, optional
Memory address of the start of the array data. Default is None,
in which case a new array is created.
offset : int, optional
@@ -1957,13 +1952,13 @@ class chararray(ndarray):
else:
dtype = string_
- # force itemsize to be a Python long, since using NumPy integer
+ # force itemsize to be a Python int, since using NumPy integer
# types results in itemsize.itemsize being used as the size of
# strings in the new array.
- itemsize = long(itemsize)
+ itemsize = int(itemsize)
- if sys.version_info[0] >= 3 and isinstance(buffer, _unicode):
- # On Py3, unicode objects do not have the buffer interface
+ if isinstance(buffer, str):
+ # unicode objects do not have the buffer interface
filler = buffer
buffer = None
else:
@@ -1993,7 +1988,7 @@ class chararray(ndarray):
if isinstance(val, character):
temp = val.rstrip()
- if _len(temp) == 0:
+ if len(temp) == 0:
val = ''
else:
val = temp
@@ -2112,7 +2107,7 @@ class chararray(ndarray):
def __mod__(self, i):
"""
Return (self % i), that is pre-Python 2.6 string formatting
- (iterpolation), element-wise for a pair of array_likes of `string_`
+ (interpolation), element-wise for a pair of array_likes of `string_`
or `unicode_`.
See also
@@ -2659,7 +2654,7 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None):
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
- `None` and `obj` is one of the following:
+ None and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or `unicode`
@@ -2677,35 +2672,16 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None):
be in any order (either C-, Fortran-contiguous, or even
discontiguous).
"""
- if isinstance(obj, (_bytes, _unicode)):
+ if isinstance(obj, (bytes, str)):
if unicode is None:
- if isinstance(obj, _unicode):
+ if isinstance(obj, str):
unicode = True
else:
unicode = False
if itemsize is None:
- itemsize = _len(obj)
- shape = _len(obj) // itemsize
-
- if unicode:
- if sys.maxunicode == 0xffff:
- # On a narrow Python build, the buffer for Unicode
- # strings is UCS2, which doesn't match the buffer for
- # NumPy Unicode types, which is ALWAYS UCS4.
- # Therefore, we need to convert the buffer. On Python
- # 2.6 and later, we can use the utf_32 codec. Earlier
- # versions don't have that codec, so we convert to a
- # numerical array that matches the input buffer, and
- # then use NumPy to convert it to UCS4. All of this
- # should happen in native endianness.
- obj = obj.encode('utf_32')
- else:
- obj = _unicode(obj)
- else:
- # Let the default Unicode -> string encoding (if any) take
- # precedence.
- obj = _bytes(obj)
+ itemsize = len(obj)
+ shape = len(obj) // itemsize
return chararray(shape, itemsize=itemsize, unicode=unicode,
buffer=obj, order=order)
@@ -2744,7 +2720,7 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None):
(itemsize != obj.itemsize) or
(not unicode and isinstance(obj, unicode_)) or
(unicode and isinstance(obj, string_))):
- obj = obj.astype((dtype, long(itemsize)))
+ obj = obj.astype((dtype, int(itemsize)))
return obj
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
@@ -2799,7 +2775,7 @@ def asarray(obj, itemsize=None, unicode=None, order=None):
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
- `None` and `obj` is one of the following:
+ None and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or 'unicode`
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index 3412c3fd5..a1e2efdb4 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -2,11 +2,8 @@
Implementation of optimized einsum.
"""
-from __future__ import division, absolute_import, print_function
-
import itertools
-from numpy.compat import basestring
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asanyarray, tensordot
from numpy.core.overrides import array_function_dispatch
@@ -552,7 +549,7 @@ def _parse_einsum_input(operands):
if len(operands) == 0:
raise ValueError("No input operands")
- if isinstance(operands[0], basestring):
+ if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
@@ -691,7 +688,7 @@ def _parse_einsum_input(operands):
return (input_subscripts, output_subscript, operands)
-def _einsum_path_dispatcher(*operands, **kwargs):
+def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
# NOTE: technically, we should only dispatch on array-like arguments, not
# subscripts (given as strings). But separating operands into
# arrays/subscripts is a little tricky/slow (given einsum's two supported
@@ -702,7 +699,7 @@ def _einsum_path_dispatcher(*operands, **kwargs):
@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
-def einsum_path(*operands, **kwargs):
+def einsum_path(*operands, optimize='greedy', einsum_call=False):
"""
einsum_path(subscripts, *operands, optimize='greedy')
@@ -812,16 +809,8 @@ def einsum_path(*operands, **kwargs):
5 defg,hd->efgh efgh->efgh
"""
- # Make sure all keywords are valid
- valid_contract_kwargs = ['optimize', 'einsum_call']
- unknown_kwargs = [k for (k, v) in kwargs.items() if k
- not in valid_contract_kwargs]
- if len(unknown_kwargs):
- raise TypeError("Did not understand the following kwargs:"
- " %s" % unknown_kwargs)
-
# Figure out what the path really is
- path_type = kwargs.pop('optimize', True)
+ path_type = optimize
if path_type is True:
path_type = 'greedy'
if path_type is None:
@@ -830,7 +819,7 @@ def einsum_path(*operands, **kwargs):
memory_limit = None
# No optimization or a named path algorithm
- if (path_type is False) or isinstance(path_type, basestring):
+ if (path_type is False) or isinstance(path_type, str):
pass
# Given an explicit path
@@ -838,7 +827,7 @@ def einsum_path(*operands, **kwargs):
pass
# Path tuple with memory limit
- elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and
+ elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
@@ -847,7 +836,7 @@ def einsum_path(*operands, **kwargs):
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
- einsum_call_arg = kwargs.pop("einsum_call", False)
+ einsum_call_arg = einsum_call
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
@@ -992,17 +981,16 @@ def einsum_path(*operands, **kwargs):
return (path, path_print)
-def _einsum_dispatcher(*operands, **kwargs):
+def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
# Arguably we dispatch on more arguments that we really should; see note in
# _einsum_path_dispatcher for why.
- for op in operands:
- yield op
- yield kwargs.get('out')
+ yield from operands
+ yield out
# Rewrite einsum to handle different cases
@array_function_dispatch(_einsum_dispatcher, module='numpy')
-def einsum(*operands, **kwargs):
+def einsum(*operands, out=None, optimize=False, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
@@ -1347,39 +1335,29 @@ def einsum(*operands, **kwargs):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
"""
-
- # Grab non-einsum kwargs; do not optimize by default.
- optimize_arg = kwargs.pop('optimize', False)
+ # Special handling if out is specified
+ specified_out = out is not None
# If no optimization, run pure einsum
- if optimize_arg is False:
+ if optimize is False:
+ if specified_out:
+ kwargs['out'] = out
return c_einsum(*operands, **kwargs)
- valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
- einsum_kwargs = {k: v for (k, v) in kwargs.items() if
- k in valid_einsum_kwargs}
-
- # Make sure all keywords are valid
- valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs
+ # Check the kwargs to avoid a more cryptic error later, without having to
+ # repeat default values here
+ valid_einsum_kwargs = ['dtype', 'order', 'casting']
unknown_kwargs = [k for (k, v) in kwargs.items() if
- k not in valid_contract_kwargs]
-
+ k not in valid_einsum_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
- # Special handeling if out is specified
- specified_out = False
- out_array = einsum_kwargs.pop('out', None)
- if out_array is not None:
- specified_out = True
# Build the contraction list and operand
- operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
+ operands, contraction_list = einsum_path(*operands, optimize=optimize,
einsum_call=True)
- handle_out = False
-
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
@@ -1410,23 +1388,23 @@ def einsum(*operands, **kwargs):
# Build a new view if needed
if (tensor_result != results_index) or handle_out:
if handle_out:
- einsum_kwargs["out"] = out_array
- new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs)
+ kwargs["out"] = out
+ new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs)
# Call einsum
else:
# If out was specified
if handle_out:
- einsum_kwargs["out"] = out_array
+ kwargs["out"] = out
# Do the contraction
- new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
+ new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
# Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
- return out_array
+ return out
else:
return operands[0]
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 6c0b9cde9..b32ad8d35 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -1,14 +1,11 @@
"""Module containing non-deprecated functions borrowed from Numeric.
"""
-from __future__ import division, absolute_import, print_function
-
import functools
import types
import warnings
import numpy as np
-from .. import VisibleDeprecationWarning
from . import multiarray as mu
from . import overrides
from . import umath as um
@@ -255,7 +252,8 @@ def reshape(a, newshape, order='C'):
>>> c.shape = (20)
Traceback (most recent call last):
...
- AttributeError: incompatible shape for a non-contiguous array
+ AttributeError: Incompatible shape for in-place modification. Use
+ `.reshape()` to make a copy with the desired shape.
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
@@ -303,8 +301,7 @@ def reshape(a, newshape, order='C'):
def _choose_dispatcher(a, choices, out=None, mode=None):
yield a
- for c in choices:
- yield c
+ yield from choices
yield out
@@ -604,15 +601,20 @@ def _transpose_dispatcher(a, axes=None):
@array_function_dispatch(_transpose_dispatcher)
def transpose(a, axes=None):
"""
- Permute the dimensions of an array.
+ Reverse or permute the axes of an array; returns the modified array.
+
+ For an array a with two axes, transpose(a) gives the matrix transpose.
Parameters
----------
a : array_like
Input array.
- axes : list of ints, optional
- By default, reverse the dimensions, otherwise permute the axes
- according to the values given.
+ axes : tuple or list of ints, optional
+ If specified, it must be a tuple or list which contains a permutation of
+ [0,1,..,N-1] where N is the number of axes of a. The i'th axis of the
+ returned array will correspond to the axis numbered ``axes[i]`` of the
+ input. If not specified, defaults to ``range(a.ndim)[::-1]``, which
+ reverses the order of the axes.
Returns
-------
@@ -796,7 +798,9 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
- argsort : Full indirect sort
+ argsort : Full indirect sort.
+ take_along_axis : Apply ``index_array`` from argpartition
+ to an array as if by calling partition.
Notes
-----
@@ -816,6 +820,14 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
+ Multi-dimensional array:
+
+ >>> x = np.array([[3, 4, 2], [1, 3, 1]])
+ >>> index_array = np.argpartition(x, kth=1, axis=-1)
+ >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)
+ array([[2, 3, 4],
+ [1, 1, 3]])
+
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
@@ -934,6 +946,10 @@ def sort(a, axis=-1, kind=None, order=None):
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
+ .. versionchanged:: 1.18.0
+
+ NaT now sorts to the end of arrays for consistency with NaN.
+
Examples
--------
>>> a = np.array([[1,4],[3,1]])
@@ -1025,6 +1041,8 @@ def argsort(a, axis=-1, kind=None, order=None):
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
+ take_along_axis : Apply ``index_array`` from argsort
+ to an array as if by calling sort.
Notes
-----
@@ -1120,6 +1138,8 @@ def argmax(a, axis=None, out=None):
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmax to an array as if by calling max.
Notes
-----
@@ -1154,6 +1174,16 @@ def argmax(a, axis=None, out=None):
>>> np.argmax(b) # Only the first occurrence is returned.
1
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmax(x, axis=-1)
+ >>> # Same as np.max(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[4],
+ [3]])
+ >>> # Same as np.max(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+ array([4, 3])
+
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
@@ -1189,6 +1219,8 @@ def argmin(a, axis=None, out=None):
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmin to an array as if by calling min.
Notes
-----
@@ -1223,6 +1255,16 @@ def argmin(a, axis=None, out=None):
>>> np.argmin(b) # Only the first occurrence is returned.
0
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmin(x, axis=-1)
+ >>> # Same as np.min(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[2],
+ [0]])
+ >>> # Same as np.max(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+ array([2, 0])
+
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
@@ -1404,12 +1446,13 @@ def squeeze(a, axis=None):
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
- or a view into `a`.
+ or a view into `a`. Note that if all axes are squeezed,
+ the result is a 0d array and not a scalar.
Raises
------
ValueError
- If `axis` is not `None`, and an axis being squeezed is not of length 1
+ If `axis` is not None, and an axis being squeezed is not of length 1
See Also
--------
@@ -1431,6 +1474,15 @@ def squeeze(a, axis=None):
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
+ >>> x = np.array([[1234]])
+ >>> x.shape
+ (1, 1)
+ >>> np.squeeze(x)
+ array(1234) # 0d array
+ >>> np.squeeze(x).shape
+ ()
+ >>> np.squeeze(x)[()]
+ 1234
"""
try:
@@ -1945,7 +1997,7 @@ def compress(condition, a, axis=None, out=None):
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Examples
--------
@@ -1988,21 +2040,22 @@ def clip(a, a_min, a_max, out=None, **kwargs):
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
- Equivalent to but faster than ``np.maximum(a_min, np.minimum(a, a_max))``.
+ Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``.
+
No check is performed to ensure ``a_min < a_max``.
Parameters
----------
a : array_like
Array containing elements to clip.
- a_min : scalar or array_like or `None`
- Minimum value. If `None`, clipping is not performed on lower
+ a_min : scalar or array_like or None
+ Minimum value. If None, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
- `None`.
- a_max : scalar or array_like or `None`
- Maximum value. If `None`, clipping is not performed on upper
+ None.
+ a_max : scalar or array_like or None
+ Maximum value. If None, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
- `None`. If `a_min` or `a_max` are array_like, then the three
+ None. If `a_min` or `a_max` are array_like, then the three
arrays will be broadcasted to match their shapes.
out : ndarray, optional
The results will be placed in this array. It may be the input
@@ -2023,7 +2076,7 @@ def clip(a, a_min, a_max, out=None, **kwargs):
See Also
--------
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Examples
--------
@@ -2206,7 +2259,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue):
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
- The default (`axis` = `None`) is to perform a logical OR over all
+ The default (``axis=None``) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
@@ -2219,7 +2272,7 @@ def any(a, axis=None, out=None, keepdims=np._NoValue):
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
- See `doc.ufuncs` (Section "Output arguments") for details.
+ See `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
@@ -2292,7 +2345,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue):
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
- The default (`axis` = `None`) is to perform a logical AND over all
+ The default (``axis=None``) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
@@ -2304,8 +2357,8 @@ def all(a, axis=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
- will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
- "Output arguments") for more details.
+ will consist of 0.0's and 1.0's). See `ufuncs-output-type` for more
+ details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
@@ -2383,8 +2436,8 @@ def cumsum(a, axis=None, dtype=None, out=None):
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
- but the type will be cast if necessary. See `doc.ufuncs`
- (Section "Output arguments") for more details.
+ but the type will be cast if necessary. See `ufuncs-output-type` for
+ more details.
Returns
-------
@@ -2529,7 +2582,7 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
- See `doc.ufuncs` (Section "Output arguments") for more details.
+ See `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
@@ -2654,7 +2707,7 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
- See `doc.ufuncs` (Section "Output arguments") for more details.
+ See `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
@@ -2861,7 +2914,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
See Also
--------
ndarray.prod : equivalent method
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
@@ -2957,7 +3010,7 @@ def cumprod(a, axis=None, dtype=None, out=None):
See Also
--------
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
@@ -3103,8 +3156,8 @@ def around(a, decimals=0, out=None):
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
- values will be cast if necessary. See `doc.ufuncs` (Section
- "Output arguments") for details.
+ values will be cast if necessary. See `ufuncs-output-type` for more
+ details.
Returns
-------
@@ -3218,7 +3271,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
- See `doc.ufuncs` for details.
+ See `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
@@ -3353,7 +3406,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
See Also
--------
var, mean, nanmean, nanstd, nanvar
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
@@ -3478,7 +3531,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
See Also
--------
std, mean, nanmean, nanstd, nanvar
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 42604ec3f..6d49b9055 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -1,13 +1,10 @@
-from __future__ import division, absolute_import, print_function
-
import functools
import warnings
import operator
import types
from . import numeric as _nx
-from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
- TooHardError, asanyarray, ndim)
+from .numeric import result_type, NaN, asanyarray, ndim
from numpy.core.multiarray import add_docstring
from numpy.core import overrides
@@ -139,7 +136,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
_mult_inplace = _nx.isscalar(delta)
- if num > 1:
+ if div > 0:
step = delta / div
if _nx.any(step == 0):
# Special handling for denormal numbers, gh-5437
@@ -154,7 +151,8 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
else:
y = y * step
else:
- # 0 and 1 item long sequences have an undefined step
+ # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
+ # have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index 31fa6b9bf..e2ff49393 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -1,8 +1,6 @@
"""Machine limits for Float32 and Float64 and (long double) if available...
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['finfo', 'iinfo']
import warnings
@@ -31,12 +29,12 @@ def _fr1(a):
a.shape = ()
return a
-class MachArLike(object):
+class MachArLike:
""" Object to simulate MachAr instance """
def __init__(self,
ftype,
- **kwargs):
+ *, eps, epsneg, huge, tiny, ibeta, **kwargs):
params = _MACHAR_PARAMS[ftype]
float_conv = lambda v: array([v], ftype)
float_to_float = lambda v : _fr1(float_conv(v))
@@ -44,11 +42,11 @@ class MachArLike(object):
self.title = params['title']
# Parameter types same as for discovered MachAr object.
- self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
- self.epsneg = float_to_float(kwargs.pop('epsneg'))
- self.xmax = self.huge = float_to_float(kwargs.pop('huge'))
- self.xmin = self.tiny = float_to_float(kwargs.pop('tiny'))
- self.ibeta = params['itype'](kwargs.pop('ibeta'))
+ self.epsilon = self.eps = float_to_float(eps)
+ self.epsneg = float_to_float(epsneg)
+ self.xmax = self.huge = float_to_float(huge)
+ self.xmin = self.tiny = float_to_float(tiny)
+ self.ibeta = params['itype'](ibeta)
self.__dict__.update(kwargs)
self.precision = int(-log10(self.eps))
self.resolution = float_to_float(float_conv(10) ** (-self.precision))
@@ -291,7 +289,7 @@ def _discovered_machar(ftype):
@set_module('numpy')
-class finfo(object):
+class finfo:
"""
finfo(dtype)
@@ -302,12 +300,13 @@ class finfo(object):
bits : int
The number of bits occupied by the type.
eps : float
- The smallest representable positive number such that
- ``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating
- point type.
- epsneg : floating point number of the appropriate type
- The smallest representable positive number such that
- ``1.0 - epsneg != 1.0``.
+ The difference between 1.0 and the next smallest representable float
+ larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
+ standard, ``eps = 2**-52``, approximately 2.22e-16.
+ epsneg : float
+ The difference between 1.0 and the next smallest representable float
+ less than 1.0. For example, for 64-bit binary floats in the IEEE-754
+ standard, ``epsneg = 2**-53``, approximately 1.11e-16.
iexp : int
The number of bits in the exponent portion of the floating point
representation.
@@ -350,6 +349,8 @@ class finfo(object):
--------
MachAr : The implementation of the tests that produce this information.
iinfo : The equivalent for integer data types.
+ spacing : The distance between a value and the nearest adjacent number
+ nextafter : The next floating point value after x1 towards x2
Notes
-----
@@ -442,7 +443,7 @@ class finfo(object):
@set_module('numpy')
-class iinfo(object):
+class iinfo:
"""
iinfo(type)
diff --git a/numpy/core/include/numpy/arrayscalars.h b/numpy/core/include/numpy/arrayscalars.h
index 64450e713..42a0df76a 100644
--- a/numpy/core/include/numpy/arrayscalars.h
+++ b/numpy/core/include/numpy/arrayscalars.h
@@ -135,7 +135,13 @@ typedef struct {
} PyScalarObject;
#define PyStringScalarObject PyStringObject
-#define PyUnicodeScalarObject PyUnicodeObject
+#define PyStringScalarObject PyStringObject
+typedef struct {
+ /* note that the PyObject_HEAD macro lives right here */
+ PyUnicodeObject base;
+ Py_UCS4 *obval;
+} PyUnicodeScalarObject;
+
typedef struct {
PyObject_VAR_HEAD
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index 95e9cb060..b18d75f35 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -45,7 +45,6 @@ extern "C" {
#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \
PyArray_IsZeroDim(m))
-#if PY_MAJOR_VERSION >= 3
#define PyArray_IsPythonNumber(obj) \
(PyFloat_Check(obj) || PyComplex_Check(obj) || \
PyLong_Check(obj) || PyBool_Check(obj))
@@ -54,17 +53,6 @@ extern "C" {
#define PyArray_IsPythonScalar(obj) \
(PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \
PyUnicode_Check(obj))
-#else
-#define PyArray_IsPythonNumber(obj) \
- (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \
- PyLong_Check(obj) || PyBool_Check(obj))
-#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \
- || PyLong_Check(obj) \
- || PyArray_IsScalar((obj), Integer))
-#define PyArray_IsPythonScalar(obj) \
- (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \
- PyUnicode_Check(obj))
-#endif
#define PyArray_IsAnyScalar(obj) \
(PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
@@ -248,11 +236,6 @@ NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
if (PyUnicode_Check(title) && PyUnicode_Check(key)) {
return PyUnicode_Compare(title, key) == 0 ? 1 : 0;
}
-#if PY_VERSION_HEX < 0x03000000
- if (PyString_Check(title) && PyString_Check(key)) {
- return PyObject_Compare(title, key) == 0 ? 1 : 0;
- }
-#endif
#endif
return 0;
}
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index ad98d562b..bec6fcf30 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -353,21 +353,12 @@ struct NpyAuxData_tag {
#define NPY_USE_PYMEM 1
+
#if NPY_USE_PYMEM == 1
- /* numpy sometimes calls PyArray_malloc() with the GIL released. On Python
- 3.3 and older, it was safe to call PyMem_Malloc() with the GIL released.
- On Python 3.4 and newer, it's better to use PyMem_RawMalloc() to be able
- to use tracemalloc. On Python 3.6, calling PyMem_Malloc() with the GIL
- released is now a fatal error in debug mode. */
-# if PY_VERSION_HEX >= 0x03040000
-# define PyArray_malloc PyMem_RawMalloc
-# define PyArray_free PyMem_RawFree
-# define PyArray_realloc PyMem_RawRealloc
-# else
-# define PyArray_malloc PyMem_Malloc
-# define PyArray_free PyMem_Free
-# define PyArray_realloc PyMem_Realloc
-# endif
+/* use the Raw versions which are safe to call with the GIL released */
+#define PyArray_malloc PyMem_RawMalloc
+#define PyArray_free PyMem_RawFree
+#define PyArray_realloc PyMem_RawRealloc
#else
#define PyArray_malloc malloc
#define PyArray_free free
diff --git a/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
index a6ee21219..440458010 100644
--- a/numpy/core/include/numpy/npy_1_7_deprecated_api.h
+++ b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
@@ -69,18 +69,11 @@
#define PyArray_DEFAULT NPY_DEFAULT_TYPE
/* These DATETIME bits aren't used internally */
-#if PY_VERSION_HEX >= 0x03000000
#define PyDataType_GetDatetimeMetaData(descr) \
((descr->metadata == NULL) ? NULL : \
((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \
PyDict_GetItemString( \
descr->metadata, NPY_METADATA_DTSTR), NULL))))
-#else
-#define PyDataType_GetDatetimeMetaData(descr) \
- ((descr->metadata == NULL) ? NULL : \
- ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \
- PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR)))))
-#endif
/*
* Deprecated as of NumPy 1.7, this kind of shortcut doesn't
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 832bc0599..efe196c84 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -13,11 +13,9 @@
#include <Python.h>
#include <stdio.h>
-#if PY_VERSION_HEX >= 0x03000000
#ifndef NPY_PY3K
#define NPY_PY3K 1
#endif
-#endif
#include "numpy/npy_common.h"
#include "numpy/ndarrayobject.h"
@@ -45,6 +43,7 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AsLong
#define PyInt_AsSsize_t PyLong_AsSsize_t
+#define PyNumber_Int PyNumber_Long
/* NOTE:
*
@@ -61,13 +60,7 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength)
#endif
-/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */
-#if (PY_VERSION_HEX < 0x02070B00) || \
- ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400))
- #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x))
-#else
- #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
-#endif
+#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */
#if PY_VERSION_HEX < 0x03050200
@@ -79,6 +72,22 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
} while (0)
#endif
+/* introduced in https://github.com/python/cpython/commit/a24107b04c1277e3c1105f98aff5bfa3a98b33a0 */
+#if PY_VERSION_HEX < 0x030800A3
+ static NPY_INLINE PyObject *
+ _PyDict_GetItemStringWithError(PyObject *v, const char *key)
+ {
+ PyObject *kv, *rv;
+ kv = PyUnicode_FromString(key);
+ if (kv == NULL) {
+ return NULL;
+ }
+ rv = PyDict_GetItemWithError(v, kv);
+ Py_DECREF(kv);
+ return rv;
+ }
+#endif
+
/*
* PyString -> PyBytes
*/
@@ -489,8 +498,6 @@ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
* The main job here is to get rid of the improved error handling
* of PyCapsules. It's a shame...
*/
-#if PY_VERSION_HEX >= 0x03000000
-
static NPY_INLINE PyObject *
NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
{
@@ -535,41 +542,6 @@ NpyCapsule_Check(PyObject *ptr)
return PyCapsule_CheckExact(ptr);
}
-#else
-
-static NPY_INLINE PyObject *
-NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *))
-{
- return PyCObject_FromVoidPtr(ptr, dtor);
-}
-
-static NPY_INLINE PyObject *
-NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context,
- void (*dtor)(void *, void *))
-{
- return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor);
-}
-
-static NPY_INLINE void *
-NpyCapsule_AsVoidPtr(PyObject *ptr)
-{
- return PyCObject_AsVoidPtr(ptr);
-}
-
-static NPY_INLINE void *
-NpyCapsule_GetDesc(PyObject *obj)
-{
- return PyCObject_GetDesc(obj);
-}
-
-static NPY_INLINE int
-NpyCapsule_Check(PyObject *ptr)
-{
- return PyCObject_Check(ptr);
-}
-
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 27b83f7b5..c2e755958 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -369,18 +369,8 @@ typedef long npy_long;
typedef float npy_float;
typedef double npy_double;
-/*
- * Hash value compatibility.
- * As of Python 3.2 hash values are of type Py_hash_t.
- * Previous versions use C long.
- */
-#if PY_VERSION_HEX < 0x03020000
-typedef long npy_hash_t;
-#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG
-#else
typedef Py_hash_t npy_hash_t;
#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
-#endif
/*
* Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index ab198f36b..4df4ea438 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -37,5 +37,9 @@
#define NPY_1_13_API_VERSION 0x00000008
#define NPY_1_14_API_VERSION 0x00000008
#define NPY_1_15_API_VERSION 0x00000008
+#define NPY_1_16_API_VERSION 0x00000008
+#define NPY_1_17_API_VERSION 0x00000008
+#define NPY_1_18_API_VERSION 0x00000008
+#define NPY_1_19_API_VERSION 0x00000008
#endif
diff --git a/numpy/random/src/bitgen.h b/numpy/core/include/numpy/random/bitgen.h
index 0adaaf2ee..83c2858dd 100644
--- a/numpy/random/src/bitgen.h
+++ b/numpy/core/include/numpy/random/bitgen.h
@@ -6,7 +6,7 @@
#include <stdbool.h>
#include <stdint.h>
-/* Must match the declaration in numpy/random/common.pxd */
+/* Must match the declaration in numpy/random/<any>.pxd */
typedef struct bitgen {
void *state;
diff --git a/numpy/random/src/distributions/distributions.h b/numpy/core/include/numpy/random/distributions.h
index 2a6b2a045..c474c4d14 100644
--- a/numpy/random/src/distributions/distributions.h
+++ b/numpy/core/include/numpy/random/distributions.h
@@ -8,7 +8,7 @@
#include <stdint.h>
#include "numpy/npy_math.h"
-#include "src/bitgen.h"
+#include "numpy/random/bitgen.h"
/*
* RAND_INT_TYPE is used to share integer generators with RandomState which
@@ -24,7 +24,7 @@
#define RAND_INT_MAX INT64_MAX
#endif
-#ifdef DLL_EXPORT
+#ifdef _MSC_VER
#define DECLDIR __declspec(dllexport)
#else
#define DECLDIR extern
@@ -59,28 +59,10 @@ typedef struct s_binomial_t {
double p4;
} binomial_t;
-/* Inline generators for internal use */
-static NPY_INLINE uint32_t next_uint32(bitgen_t *bitgen_state) {
- return bitgen_state->next_uint32(bitgen_state->state);
-}
-
-static NPY_INLINE uint64_t next_uint64(bitgen_t *bitgen_state) {
- return bitgen_state->next_uint64(bitgen_state->state);
-}
-
-static NPY_INLINE float next_float(bitgen_t *bitgen_state) {
- return (next_uint32(bitgen_state) >> 9) * (1.0f / 8388608.0f);
-}
-
-static NPY_INLINE double next_double(bitgen_t *bitgen_state) {
- return bitgen_state->next_double(bitgen_state->state);
-}
-
-DECLDIR double loggam(double x);
-
-DECLDIR float random_float(bitgen_t *bitgen_state);
-DECLDIR double random_double(bitgen_t *bitgen_state);
-DECLDIR void random_double_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out);
+DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state);
+DECLDIR double random_standard_uniform(bitgen_t *bitgen_state);
+DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *);
DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
@@ -88,37 +70,23 @@ DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
-DECLDIR void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt,
- double *out);
DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
-DECLDIR double random_standard_exponential_zig(bitgen_t *bitgen_state);
-DECLDIR void random_standard_exponential_zig_fill(bitgen_t *bitgen_state,
- npy_intp cnt, double *out);
-DECLDIR float random_standard_exponential_zig_f(bitgen_t *bitgen_state);
-
-/*
-DECLDIR double random_gauss(bitgen_t *bitgen_state);
-DECLDIR float random_gauss_f(bitgen_t *bitgen_state);
-*/
-DECLDIR double random_gauss_zig(bitgen_t *bitgen_state);
-DECLDIR float random_gauss_zig_f(bitgen_t *bitgen_state);
-DECLDIR void random_gauss_zig_fill(bitgen_t *bitgen_state, npy_intp cnt,
- double *out);
-
-/*
+DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *);
+DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *);
+
+DECLDIR double random_standard_normal(bitgen_t *bitgen_state);
+DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state);
+DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *);
DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
-*/
-DECLDIR double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape);
-DECLDIR float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape);
-/*
DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
-*/
-DECLDIR double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale);
DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
-DECLDIR float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale);
+DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale);
DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
@@ -146,27 +114,16 @@ DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mod
DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
- double p);
-
-DECLDIR RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
- RAND_INT_TYPE n,
- double p,
- binomial_t *binomial);
-DECLDIR RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
- RAND_INT_TYPE n,
- double p,
- binomial_t *binomial);
+ double p);
+
DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
int64_t n, binomial_t *binomial);
DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p);
-DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p);
-DECLDIR RAND_INT_TYPE random_geometric_inversion(bitgen_t *bitgen_state, double p);
DECLDIR RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p);
DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state,
int64_t good, int64_t bad, int64_t sample);
-
DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
/* Generate random uint64 numbers in closed interval [off, off + rng]. */
@@ -211,4 +168,33 @@ DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
double *pix, npy_intp d, binomial_t *binomial);
+/* multivariate hypergeometric, "count" method */
+DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates);
+
+/* multivariate hypergeometric, "marginals" method */
+DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates);
+
+/* Common to legacy-distributions.c and distributions.c but not exported */
+
+RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+double random_loggam(double x);
+static NPY_INLINE double next_double(bitgen_t *bitgen_state) {
+ return bitgen_state->next_double(bitgen_state->state);
+}
+
#endif
diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h
index 5ff4a0041..e5d845842 100644
--- a/numpy/core/include/numpy/ufuncobject.h
+++ b/numpy/core/include/numpy/ufuncobject.h
@@ -14,8 +14,8 @@ extern "C" {
*/
typedef void (*PyUFuncGenericFunction)
(char **args,
- npy_intp *dimensions,
- npy_intp *strides,
+ npy_intp const *dimensions,
+ npy_intp const *strides,
void *innerloopdata);
/*
diff --git a/numpy/core/machar.py b/numpy/core/machar.py
index 202580bdb..a48dc3d50 100644
--- a/numpy/core/machar.py
+++ b/numpy/core/machar.py
@@ -5,8 +5,6 @@ floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
@@ -16,7 +14,7 @@ from numpy.core.overrides import set_module
# Need to speed this up...especially for longfloat
@set_module('numpy')
-class MachAr(object):
+class MachAr:
"""
Diagnosing machine parameters.
diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py
index 062645551..ad66446c2 100644
--- a/numpy/core/memmap.py
+++ b/numpy/core/memmap.py
@@ -1,9 +1,7 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import (
- long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path
+ os_fspath, contextlib_nullcontext, is_pathlib_path
)
from numpy.core.overrides import set_module
@@ -244,7 +242,7 @@ class memmap(ndarray):
for k in shape:
size *= k
- bytes = long(offset + size*_dbytes)
+ bytes = int(offset + size*_dbytes)
if mode in ('w+', 'r+') and flen < bytes:
fid.seek(bytes - 1, 0)
@@ -273,7 +271,7 @@ class memmap(ndarray):
# special case - if we were constructed with a pathlib.path,
# then filename is a path object, not a string
self.filename = filename.resolve()
- elif hasattr(fid, "name") and isinstance(fid.name, basestring):
+ elif hasattr(fid, "name") and isinstance(fid.name, str):
# py3 returns int for TemporaryFile().name
self.filename = os.path.abspath(fid.name)
# same as memmap copies (e.g. memmap + 1)
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index c0fcc10ff..e207280f0 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -7,15 +7,15 @@ by importing from the extension module.
"""
import functools
-import sys
import warnings
-import sys
from . import overrides
from . import _multiarray_umath
-import numpy as np
-from numpy.core._multiarray_umath import *
-from numpy.core._multiarray_umath import (
+from ._multiarray_umath import * # noqa: F403
+# These imports are needed for backward compatibility,
+# do not change them. issue gh-15518
+# _get_ndarray_c_version is semi-public, on purpose not added to __all__
+from ._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
_ARRAY_API, _monotonicity, _get_ndarray_c_version
)
@@ -33,7 +33,7 @@ __all__ = [
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner',
- 'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
+ 'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
'nested_iters', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
@@ -41,8 +41,6 @@ __all__ = [
'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
-if sys.version_info.major < 3:
- __all__ += ['newbuffer', 'getbuffer']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
@@ -911,8 +909,9 @@ def bincount(x, weights=None, minlength=None):
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: array cannot be safely cast to required type
+ ...
+ TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
+ according to the rule 'safe'
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 6d25f864b..83d985a7c 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1,15 +1,11 @@
-from __future__ import division, absolute_import, print_function
-
import functools
import itertools
import operator
import sys
import warnings
import numbers
-import contextlib
import numpy as np
-from numpy.compat import pickle, basestring
from . import multiarray
from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
@@ -17,12 +13,10 @@ from .multiarray import (
WRAP, arange, array, broadcast, can_cast, compare_chararrays,
concatenate, copyto, dot, dtype, empty,
empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
- inner, int_asbuffer, lexsort, matmul, may_share_memory,
+ inner, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
zeros, normalize_axis_index)
-if sys.version_info[0] < 3:
- from .multiarray import newbuffer, getbuffer
from . import overrides
from . import umath
@@ -39,12 +33,6 @@ bitwise_not = invert
ufunc = type(sin)
newaxis = None
-if sys.version_info[0] >= 3:
- import builtins
-else:
- import __builtin__ as builtins
-
-
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
@@ -52,7 +40,7 @@ array_function_dispatch = functools.partial(
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
- 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where',
+ 'fromstring', 'fromfile', 'frombuffer', 'where',
'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
@@ -67,9 +55,6 @@ __all__ = [
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
-if sys.version_info[0] < 3:
- __all__.extend(['getbuffer', 'newbuffer'])
-
@set_module('numpy')
class ComplexWarning(RuntimeWarning):
@@ -289,10 +274,10 @@ def full(shape, fill_value, dtype=None, order='C'):
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
- fill_value : scalar
+ fill_value : scalar or array_like
Fill value.
dtype : data-type, optional
- The desired data-type for the array The default, `None`, means
+ The desired data-type for the array The default, None, means
`np.array(fill_value).dtype`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
@@ -319,6 +304,10 @@ def full(shape, fill_value, dtype=None, order='C'):
array([[10, 10],
[10, 10]])
+ >>> np.full((2, 2), [1, 2])
+ array([[1, 2],
+ [1, 2]])
+
"""
if dtype is None:
dtype = array(fill_value).dtype
@@ -395,12 +384,12 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
return res
-def _count_nonzero_dispatcher(a, axis=None):
+def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):
return (a,)
@array_function_dispatch(_count_nonzero_dispatcher)
-def count_nonzero(a, axis=None):
+def count_nonzero(a, axis=None, *, keepdims=False):
"""
Counts the number of non-zero values in the array ``a``.
@@ -425,6 +414,13 @@ def count_nonzero(a, axis=None):
.. versionadded:: 1.12.0
+ keepdims : bool, optional
+ If this is set to True, the axes that are counted are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ .. versionadded:: 1.19.0
+
Returns
-------
count : int or array of int
@@ -440,15 +436,19 @@ def count_nonzero(a, axis=None):
--------
>>> np.count_nonzero(np.eye(4))
4
- >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]])
+ >>> a = np.array([[0, 1, 7, 0],
+ ... [3, 0, 2, 19]])
+ >>> np.count_nonzero(a)
5
- >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=0)
- array([1, 1, 1, 1, 1])
- >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1)
+ >>> np.count_nonzero(a, axis=0)
+ array([1, 1, 2, 1])
+ >>> np.count_nonzero(a, axis=1)
array([2, 3])
-
+ >>> np.count_nonzero(a, axis=1, keepdims=True)
+ array([[2],
+ [3]])
"""
- if axis is None:
+ if axis is None and not keepdims:
return multiarray.count_nonzero(a)
a = asanyarray(a)
@@ -459,7 +459,7 @@ def count_nonzero(a, axis=None):
else:
a_bool = a.astype(np.bool_, copy=False)
- return a_bool.sum(axis=axis, dtype=np.intp)
+ return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)
@set_module('numpy')
@@ -635,7 +635,7 @@ _mode_from_name_dict = {'v': 0,
def _mode_from_name(mode):
- if isinstance(mode, basestring):
+ if isinstance(mode, str):
return _mode_from_name_dict[mode.lower()[0]]
return mode
@@ -960,6 +960,9 @@ def tensordot(a, b, axes=2):
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
+ The shape of the result consists of the non-contracted axes of the
+ first tensor, followed by the non-contracted axes of the second.
+
Examples
--------
A "traditional" example:
@@ -1720,7 +1723,7 @@ def indices(dimensions, dtype=int, sparse=False):
@set_module('numpy')
-def fromfunction(function, shape, **kwargs):
+def fromfunction(function, shape, *, dtype=float, **kwargs):
"""
Construct an array by executing a function over each coordinate.
@@ -1771,7 +1774,6 @@ def fromfunction(function, shape, **kwargs):
[2, 3, 4]])
"""
- dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
@@ -1781,19 +1783,19 @@ def _frombuffer(buf, dtype, shape, order):
@set_module('numpy')
-def isscalar(num):
+def isscalar(element):
"""
- Returns True if the type of `num` is a scalar type.
+ Returns True if the type of `element` is a scalar type.
Parameters
----------
- num : any
+ element : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
- True if `num` is a scalar type, False if it is not.
+ True if `element` is a scalar type, False if it is not.
See Also
--------
@@ -1801,10 +1803,14 @@ def isscalar(num):
Notes
-----
- In almost all cases ``np.ndim(x) == 0`` should be used instead of this
- function, as that will also return true for 0d arrays. This is how
- numpy overloads functions in the style of the ``dx`` arguments to `gradient`
- and the ``bins`` argument to `histogram`. Some key differences:
+ If you need a stricter way to identify a *numerical* scalar, use
+ ``isinstance(x, numbers.Number)``, as that returns ``False`` for most
+ non-numerical elements such as strings.
+
+ In most cases ``np.ndim(x) == 0`` should be used instead of this function,
+ as that will also return true for 0d arrays. This is how numpy overloads
+ functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
+ argument to `histogram`. Some key differences:
+--------------------------------------+---------------+-------------------+
| x |``isscalar(x)``|``np.ndim(x) == 0``|
@@ -1852,9 +1858,9 @@ def isscalar(num):
True
"""
- return (isinstance(num, generic)
- or type(num) in ScalarType
- or isinstance(num, numbers.Number))
+ return (isinstance(element, generic)
+ or type(element) in ScalarType
+ or isinstance(element, numbers.Number))
@set_module('numpy')
@@ -2091,9 +2097,9 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
`atol` are added together to compare against the absolute difference
between `a` and `b`.
- If either array contains one or more NaNs, False is returned.
- Infs are treated as equal if they are in the same place and of the same
- sign in both arrays.
+ NaNs are treated as equal if they are in the same place and if
+ ``equal_nan=True``. Infs are treated as equal if they are in the same
+ place and of the same sign in both arrays.
Parameters
----------
@@ -2105,7 +2111,7 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
- considered equal to NaN's in `b`.
+ considered equal to NaN's in `b` in the output array.
.. versionadded:: 1.10.0
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index ab1ff65a4..aac741612 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -79,14 +79,10 @@ Exported symbols include:
\\-> object_ (not used much) (kind=O)
"""
-from __future__ import division, absolute_import, print_function
-
import types as _types
-import sys
import numbers
import warnings
-from numpy.compat import bytes, long
from numpy.core.multiarray import (
typeinfo, ndarray, array, empty, dtype, datetime_data,
datetime_as_string, busday_offset, busday_count, is_busday,
@@ -122,11 +118,8 @@ from ._dtype import _kind_name
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
-if sys.version_info[0] >= 3:
- from builtins import bool, int, float, complex, object, str
- unicode = str
-else:
- from __builtin__ import bool, int, float, complex, object, unicode, str
+from builtins import bool, int, float, complex, object, str, bytes
+from numpy.compat import long, unicode
# We use this later
@@ -319,9 +312,11 @@ def issubclass_(arg1, arg2):
Examples
--------
>>> np.issubclass_(np.int32, int)
- False # True on Python 2.7
+ False
>>> np.issubclass_(np.int32, float)
False
+ >>> np.issubclass_(np.float64, float)
+ True
"""
try:
@@ -392,35 +387,7 @@ def issubdtype(arg1, arg2):
if not issubclass_(arg1, generic):
arg1 = dtype(arg1).type
if not issubclass_(arg2, generic):
- arg2_orig = arg2
arg2 = dtype(arg2).type
- if not isinstance(arg2_orig, dtype):
- # weird deprecated behaviour, that tried to infer np.floating from
- # float, and similar less obvious things, such as np.generic from
- # basestring
- mro = arg2.mro()
- arg2 = mro[1] if len(mro) > 1 else mro[0]
-
- def type_repr(x):
- """ Helper to produce clear error messages """
- if not isinstance(x, type):
- return repr(x)
- elif issubclass(x, generic):
- return "np.{}".format(x.__name__)
- else:
- return x.__name__
-
- # 1.14, 2017-08-01
- warnings.warn(
- "Conversion of the second argument of issubdtype from `{raw}` "
- "to `{abstract}` is deprecated. In future, it will be treated "
- "as `{concrete} == np.dtype({raw}).type`.".format(
- raw=type_repr(arg2_orig),
- abstract=type_repr(arg2),
- concrete=type_repr(dtype(arg2_orig).type)
- ),
- FutureWarning, stacklevel=2
- )
return issubclass(arg1, arg2)
@@ -485,7 +452,7 @@ def sctype2char(sctype):
Examples
--------
- >>> for sctype in [np.int32, np.double, np.complex, np.string_, np.ndarray]:
+ >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]:
... print(np.sctype2char(sctype))
l # may vary
d
diff --git a/numpy/core/records.py b/numpy/core/records.py
index a1439f9df..9c3530787 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -33,9 +33,6 @@ Record arrays allow us to access fields as properties::
array([2., 2.])
"""
-from __future__ import division, absolute_import, print_function
-
-import sys
import os
import warnings
from collections import Counter, OrderedDict
@@ -43,7 +40,7 @@ from collections import Counter, OrderedDict
from . import numeric as sb
from . import numerictypes as nt
from numpy.compat import (
- isfileobj, bytes, long, unicode, os_fspath, contextlib_nullcontext
+ isfileobj, os_fspath, contextlib_nullcontext
)
from numpy.core.overrides import set_module
from .arrayprint import get_printoptions
@@ -98,7 +95,7 @@ def find_duplicate(list):
@set_module('numpy')
-class format_parser(object):
+class format_parser:
"""
Class to convert formats, names, titles description to a dtype.
@@ -160,8 +157,7 @@ class format_parser(object):
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
- self._createdescr(byteorder)
- self.dtype = self._descr
+ self._createdtype(byteorder)
def _parseFormats(self, formats, aligned=False):
""" Parse the field formats """
@@ -188,10 +184,10 @@ class format_parser(object):
"""convert input field names into a list and assign to the _names
attribute """
- if (names):
- if (type(names) in [list, tuple]):
+ if names:
+ if type(names) in [list, tuple]:
pass
- elif isinstance(names, (str, unicode)):
+ elif isinstance(names, str):
names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
@@ -211,25 +207,28 @@ class format_parser(object):
if _dup:
raise ValueError("Duplicate field names: %s" % _dup)
- if (titles):
+ if titles:
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
- if (self._nfields > len(titles)):
+ if self._nfields > len(titles):
self._titles += [None] * (self._nfields - len(titles))
- def _createdescr(self, byteorder):
- descr = sb.dtype({'names':self._names,
- 'formats':self._f_formats,
- 'offsets':self._offsets,
- 'titles':self._titles})
- if (byteorder is not None):
+ def _createdtype(self, byteorder):
+ dtype = sb.dtype({
+ 'names': self._names,
+ 'formats': self._f_formats,
+ 'offsets': self._offsets,
+ 'titles': self._titles,
+ })
+ if byteorder is not None:
byteorder = _byteorderconv[byteorder[0]]
- descr = descr.newbyteorder(byteorder)
+ dtype = dtype.newbyteorder(byteorder)
+
+ self.dtype = dtype
- self._descr = descr
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
@@ -251,7 +250,7 @@ class record(nt.void):
return super(record, self).__str__()
def __getattribute__(self, attr):
- if attr in ['setfield', 'getfield', 'dtype']:
+ if attr in ('setfield', 'getfield', 'dtype'):
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
@@ -276,7 +275,7 @@ class record(nt.void):
"attribute '%s'" % attr)
def __setattr__(self, attr, val):
- if attr in ['setfield', 'getfield', 'dtype']:
+ if attr in ('setfield', 'getfield', 'dtype'):
raise AttributeError("Cannot set '%s' attribute" % attr)
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
@@ -433,7 +432,7 @@ class recarray(ndarray):
if dtype is not None:
descr = sb.dtype(dtype)
else:
- descr = format_parser(formats, names, titles, aligned, byteorder)._descr
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr), order=order)
@@ -496,8 +495,7 @@ class recarray(ndarray):
except Exception:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
- exctype, value = sys.exc_info()[:2]
- raise exctype(value)
+ raise
else:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
@@ -535,8 +533,7 @@ class recarray(ndarray):
def __repr__(self):
repr_dtype = self.dtype
- if (self.dtype.type is record
- or (not issubclass(self.dtype.type, nt.void))):
+ if self.dtype.type is record or not issubclass(self.dtype.type, nt.void):
# If this is a full record array (has numpy.record dtype),
# or if it has a scalar (non-void) dtype with no records,
# represent it using the rec.array function. Since rec.array
@@ -584,6 +581,18 @@ class recarray(ndarray):
return self.setfield(val, *res)
+def _deprecate_shape_0_as_None(shape):
+ if shape == 0:
+ warnings.warn(
+ "Passing `shape=0` to have the shape be inferred is deprecated, "
+ "and in future will be equivalent to `shape=(0,)`. To infer "
+ "the shape and suppress this warning, pass `shape=None` instead.",
+ FutureWarning, stacklevel=3)
+ return None
+ else:
+ return shape
+
+
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a record array from a (flat) list of arrays
@@ -601,26 +610,24 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None,
arrayList = [sb.asarray(x) for x in arrayList]
- if shape is None or shape == 0:
- shape = arrayList[0].shape
+ # NumPy 1.19.0, 2020-01-01
+ shape = _deprecate_shape_0_as_None(shape)
- if isinstance(shape, int):
+ if shape is None:
+ shape = arrayList[0].shape
+ elif isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
- formats = []
- for obj in arrayList:
- formats.append(obj.dtype)
+ formats = [obj.dtype for obj in arrayList]
if dtype is not None:
descr = sb.dtype(dtype)
- _names = descr.names
else:
- parsed = format_parser(formats, names, titles, aligned, byteorder)
- _names = parsed._names
- descr = parsed._descr
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
+ _names = descr.names
# Determine shape from data-type.
if len(descr) != len(arrayList):
@@ -685,14 +692,16 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
if dtype is not None:
descr = sb.dtype((record, dtype))
else:
- descr = format_parser(formats, names, titles, aligned, byteorder)._descr
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
try:
retval = sb.array(recList, dtype=descr)
except (TypeError, ValueError):
- if (shape is None or shape == 0):
+ # NumPy 1.19.0, 2020-01-01
+ shape = _deprecate_shape_0_as_None(shape)
+ if shape is None:
shape = len(recList)
- if isinstance(shape, (int, long)):
+ if isinstance(shape, int):
shape = (shape,)
if len(shape) > 1:
raise ValueError("Can only deal with 1-d array.")
@@ -726,10 +735,14 @@ def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
if dtype is not None:
descr = sb.dtype(dtype)
else:
- descr = format_parser(formats, names, titles, aligned, byteorder)._descr
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
itemsize = descr.itemsize
- if (shape is None or shape == 0 or shape == -1):
+
+ # NumPy 1.19.0, 2020-01-01
+ shape = _deprecate_shape_0_as_None(shape)
+
+ if shape in (None, -1):
shape = (len(datastring) - offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
@@ -772,9 +785,12 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
if dtype is None and formats is None:
raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
- if (shape is None or shape == 0):
+ # NumPy 1.19.0, 2020-01-01
+ shape = _deprecate_shape_0_as_None(shape)
+
+ if shape is None:
shape = (-1,)
- elif isinstance(shape, (int, long)):
+ elif isinstance(shape, int):
shape = (shape,)
if isfileobj(fd):
@@ -785,14 +801,14 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
ctx = open(os_fspath(fd), 'rb')
with ctx as fd:
- if (offset > 0):
+ if offset > 0:
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
- descr = format_parser(formats, names, titles, aligned, byteorder)._descr
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
itemsize = descr.itemsize
@@ -824,7 +840,7 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
"""
if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
- (formats is None) and (dtype is None)):
+ formats is None and dtype is None):
raise ValueError("Must define formats (or dtype) if object is "
"None, string, or an open file")
@@ -833,7 +849,7 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
- aligned, byteorder)._descr
+ aligned, byteorder).dtype
else:
kwds = {'formats': formats,
'names': names,
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 5f2f4a7b2..15e732614 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -1,5 +1,3 @@
-from __future__ import division, print_function
-
import os
import sys
import pickle
@@ -16,7 +14,7 @@ from numpy._build_utils.apple_accelerate import (
uses_accelerate_framework, get_sgemv_fix
)
from numpy.compat import npy_load_module
-from setup_common import *
+from setup_common import * # noqa: F403
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
@@ -38,7 +36,7 @@ NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CH
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
-class CallOnceOnly(object):
+class CallOnceOnly:
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
@@ -469,10 +467,6 @@ def configuration(parent_package='',top_path=None):
moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift')
moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift')
- # Py3K check
- if sys.version_info[0] >= 3:
- moredefs.append(('NPY_PY3K', 1))
-
# Generate the config.h file from moredefs
with open(target, 'w') as target_f:
for d in moredefs:
@@ -655,6 +649,9 @@ def configuration(parent_package='',top_path=None):
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
+ # rerun the failing command in verbose mode
+ config_cmd.compiler.verbose = True
+ config_cmd.try_link('int main(void) { return 0;}')
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
@@ -748,10 +745,17 @@ def configuration(parent_package='',top_path=None):
join('src', 'common', 'ucsnarrow.c'),
join('src', 'common', 'ufunc_override.c'),
join('src', 'common', 'numpyos.c'),
+ join('src', 'common', 'npy_cpu_features.c.src'),
]
- blas_info = get_info('blas_opt', 0)
- if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
+ if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
+ blas_info = get_info('blas_ilp64_opt', 2)
+ else:
+ blas_info = get_info('blas_opt', 0)
+
+ have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', [])
+
+ if have_blas:
extra_info = blas_info
# These files are also in MANIFEST.in so that they are always in
# the source distribution independently of HAVE_CBLAS.
@@ -771,7 +775,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'arrayfunction_override.h'),
- join('src', 'multiarray', 'buffer.h'),
+ join('src', 'multiarray', 'npy_buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
@@ -895,7 +899,6 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'clip.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'extobj.c'),
- join('src', 'umath', 'cpuid.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c'),
join('src', 'umath', 'override.c'),
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 84b78b585..63c4a76a9 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -1,10 +1,7 @@
-from __future__ import division, absolute_import, print_function
-
# Code common to build tools
import sys
import warnings
import copy
-import binascii
import textwrap
from numpy.distutils.misc_util import mingw32
@@ -135,11 +132,6 @@ OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
("__builtin_bswap64", '5u'),
("__builtin_expect", '5, 0'),
("__builtin_mul_overflow", '5, 5, (int*)5'),
- # broken on OSX 10.11, make sure its not optimized away
- ("volatile int r = __builtin_cpu_supports", '"sse"',
- "stdio.h", "__BUILTIN_CPU_SUPPORTS"),
- ("volatile int r = __builtin_cpu_supports", '"avx512f"',
- "stdio.h", "__BUILTIN_CPU_SUPPORTS_AVX512F"),
# MMX only needed for icc, but some clangs don't have it
("_m_from_int64", '0', "emmintrin.h"),
("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
@@ -246,9 +238,9 @@ def check_long_double_representation(cmd):
# Disable multi-file interprocedural optimization in the Intel compiler on Linux
# which generates intermediary object files and prevents checking the
# float representation.
- elif (sys.platform != "win32"
- and cmd.compiler.compiler_type.startswith('intel')
- and '-ipo' in cmd.compiler.cc_exe):
+ elif (sys.platform != "win32"
+ and cmd.compiler.compiler_type.startswith('intel')
+ and '-ipo' in cmd.compiler.cc_exe):
newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '')
cmd.compiler.set_executables(
compiler=newcompiler,
@@ -266,8 +258,9 @@ def check_long_double_representation(cmd):
except ValueError:
# try linking to support CC="gcc -flto" or icc -ipo
# struct needs to be volatile so it isn't optimized away
+ # additionally "clang -flto" requires the foo struct to be used
body = body.replace('struct', 'volatile struct')
- body += "int main(void) { return 0; }\n"
+ body += "int main(void) { return foo.before[0]; }\n"
src, obj = cmd._compile(body, None, None, 'c')
cmd.temp_files.append("_configtest")
cmd.compiler.link_executable([obj], "_configtest")
@@ -312,32 +305,15 @@ def pyod(filename):
We only implement enough to get the necessary information for long double
representation, this is not intended as a compatible replacement for od.
"""
- def _pyod2():
- out = []
-
- with open(filename, 'rb') as fid:
- yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
- for i in range(0, len(yo), 16):
- line = ['%07d' % int(oct(i))]
- line.extend(['%03d' % c for c in yo[i:i+16]])
- out.append(" ".join(line))
- return out
-
- def _pyod3():
- out = []
-
- with open(filename, 'rb') as fid:
- yo2 = [oct(o)[2:] for o in fid.read()]
- for i in range(0, len(yo2), 16):
- line = ['%07d' % int(oct(i)[2:])]
- line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
- out.append(" ".join(line))
- return out
-
- if sys.version_info[0] < 3:
- return _pyod2()
- else:
- return _pyod3()
+ out = []
+ with open(filename, 'rb') as fid:
+ yo2 = [oct(o)[2:] for o in fid.read()]
+ for i in range(0, len(yo2), 16):
+ line = ['%07d' % int(oct(i)[2:])]
+ line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
+ out.append(" ".join(line))
+ return out
+
_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
'001', '043', '105', '147', '211', '253', '315', '357']
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index d7e769e62..ee56dbe43 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -1,9 +1,8 @@
-from __future__ import division, absolute_import, print_function
-
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
import functools
+import itertools
import operator
import warnings
@@ -472,7 +471,7 @@ def _block_check_depths_match(arrays, parent_index=[]):
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
- refer to it, and the last index along the empty axis will be `None`.
+ refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
@@ -532,14 +531,7 @@ def _atleast_nd(a, ndim):
def _accumulate(values):
- # Helper function because Python 2.7 doesn't have
- # itertools.accumulate
- value = 0
- accumulated = []
- for v in values:
- value += v
- accumulated.append(value)
- return accumulated
+ return list(itertools.accumulate(values))
def _concatenate_shapes(shapes, axis):
@@ -575,7 +567,7 @@ def _concatenate_shapes(shapes, axis):
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
- to existing slice tuple without creating a new tuple everytime.
+ to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
@@ -678,8 +670,7 @@ def _block_dispatcher(arrays):
# tuple. Also, we know that list.__array_function__ will never exist.
if type(arrays) is list:
for subarrays in arrays:
- for subarray in _block_dispatcher(subarrays):
- yield subarray
+ yield from _block_dispatcher(subarrays)
else:
yield arrays
diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c
index 0ac1b01c6..d626d1260 100644
--- a/numpy/core/src/common/array_assign.c
+++ b/numpy/core/src/common/array_assign.c
@@ -27,9 +27,9 @@
/* See array_assign.h for parameter documentation */
NPY_NO_EXPORT int
-broadcast_strides(int ndim, npy_intp *shape,
- int strides_ndim, npy_intp *strides_shape, npy_intp *strides,
- char *strides_name,
+broadcast_strides(int ndim, npy_intp const *shape,
+ int strides_ndim, npy_intp const *strides_shape, npy_intp const *strides,
+ char const *strides_name,
npy_intp *out_strides)
{
int idim, idim_start = ndim - strides_ndim;
@@ -84,8 +84,8 @@ broadcast_error: {
/* See array_assign.h for parameter documentation */
NPY_NO_EXPORT int
-raw_array_is_aligned(int ndim, npy_intp *shape,
- char *data, npy_intp *strides, int alignment)
+raw_array_is_aligned(int ndim, npy_intp const *shape,
+ char *data, npy_intp const *strides, int alignment)
{
/*
diff --git a/numpy/core/src/common/array_assign.h b/numpy/core/src/common/array_assign.h
index 69ef56bb4..f5d884dd9 100644
--- a/numpy/core/src/common/array_assign.h
+++ b/numpy/core/src/common/array_assign.h
@@ -44,8 +44,8 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_assign_scalar(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+raw_array_assign_scalar(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
PyArray_Descr *src_dtype, char *src_data);
/*
@@ -55,11 +55,11 @@ raw_array_assign_scalar(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
PyArray_Descr *src_dtype, char *src_data,
PyArray_Descr *wheremask_dtype, char *wheremask_data,
- npy_intp *wheremask_strides);
+ npy_intp const *wheremask_strides);
/******** LOW-LEVEL ARRAY MANIPULATION HELPERS ********/
@@ -80,9 +80,9 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-broadcast_strides(int ndim, npy_intp *shape,
- int strides_ndim, npy_intp *strides_shape, npy_intp *strides,
- char *strides_name,
+broadcast_strides(int ndim, npy_intp const *shape,
+ int strides_ndim, npy_intp const *strides_shape, npy_intp const *strides,
+ char const *strides_name,
npy_intp *out_strides);
/*
@@ -93,8 +93,8 @@ broadcast_strides(int ndim, npy_intp *shape,
* cannot-be-aligned, in which case 0 (false) is always returned.
*/
NPY_NO_EXPORT int
-raw_array_is_aligned(int ndim, npy_intp *shape,
- char *data, npy_intp *strides, int alignment);
+raw_array_is_aligned(int ndim, npy_intp const *shape,
+ char *data, npy_intp const *strides, int alignment);
/*
* Checks if an array is aligned to its "true alignment"
diff --git a/numpy/core/src/common/binop_override.h b/numpy/core/src/common/binop_override.h
index 47df63e38..c5e7ab808 100644
--- a/numpy/core/src/common/binop_override.h
+++ b/numpy/core/src/common/binop_override.h
@@ -129,11 +129,14 @@ binop_should_defer(PyObject *self, PyObject *other, int inplace)
* check whether __array_ufunc__ equals None.
*/
attr = PyArray_LookupSpecial(other, "__array_ufunc__");
- if (attr) {
+ if (attr != NULL) {
defer = !inplace && (attr == Py_None);
Py_DECREF(attr);
return defer;
}
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
/*
* Otherwise, we need to check for the legacy __array_priority__. But if
* other.__class__ is a subtype of self.__class__, then it's already had
diff --git a/numpy/core/src/common/cblasfuncs.c b/numpy/core/src/common/cblasfuncs.c
index 39572fed4..e78587de0 100644
--- a/numpy/core/src/common/cblasfuncs.c
+++ b/numpy/core/src/common/cblasfuncs.c
@@ -24,28 +24,28 @@ static const float oneF[2] = {1.0, 0.0}, zeroF[2] = {0.0, 0.0};
static void
gemm(int typenum, enum CBLAS_ORDER order,
enum CBLAS_TRANSPOSE transA, enum CBLAS_TRANSPOSE transB,
- int m, int n, int k,
- PyArrayObject *A, int lda, PyArrayObject *B, int ldb, PyArrayObject *R)
+ npy_intp m, npy_intp n, npy_intp k,
+ PyArrayObject *A, npy_intp lda, PyArrayObject *B, npy_intp ldb, PyArrayObject *R)
{
const void *Adata = PyArray_DATA(A), *Bdata = PyArray_DATA(B);
void *Rdata = PyArray_DATA(R);
- int ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
+ npy_intp ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
switch (typenum) {
case NPY_DOUBLE:
- cblas_dgemm(order, transA, transB, m, n, k, 1.,
+ CBLAS_FUNC(cblas_dgemm)(order, transA, transB, m, n, k, 1.,
Adata, lda, Bdata, ldb, 0., Rdata, ldc);
break;
case NPY_FLOAT:
- cblas_sgemm(order, transA, transB, m, n, k, 1.f,
+ CBLAS_FUNC(cblas_sgemm)(order, transA, transB, m, n, k, 1.f,
Adata, lda, Bdata, ldb, 0.f, Rdata, ldc);
break;
case NPY_CDOUBLE:
- cblas_zgemm(order, transA, transB, m, n, k, oneD,
+ CBLAS_FUNC(cblas_zgemm)(order, transA, transB, m, n, k, oneD,
Adata, lda, Bdata, ldb, zeroD, Rdata, ldc);
break;
case NPY_CFLOAT:
- cblas_cgemm(order, transA, transB, m, n, k, oneF,
+ CBLAS_FUNC(cblas_cgemm)(order, transA, transB, m, n, k, oneF,
Adata, lda, Bdata, ldb, zeroF, Rdata, ldc);
break;
}
@@ -57,29 +57,29 @@ gemm(int typenum, enum CBLAS_ORDER order,
*/
static void
gemv(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
- PyArrayObject *A, int lda, PyArrayObject *X, int incX,
+ PyArrayObject *A, npy_intp lda, PyArrayObject *X, npy_intp incX,
PyArrayObject *R)
{
const void *Adata = PyArray_DATA(A), *Xdata = PyArray_DATA(X);
void *Rdata = PyArray_DATA(R);
- int m = PyArray_DIM(A, 0), n = PyArray_DIM(A, 1);
+ npy_intp m = PyArray_DIM(A, 0), n = PyArray_DIM(A, 1);
switch (typenum) {
case NPY_DOUBLE:
- cblas_dgemv(order, trans, m, n, 1., Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_dgemv)(order, trans, m, n, 1., Adata, lda, Xdata, incX,
0., Rdata, 1);
break;
case NPY_FLOAT:
- cblas_sgemv(order, trans, m, n, 1.f, Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_sgemv)(order, trans, m, n, 1.f, Adata, lda, Xdata, incX,
0.f, Rdata, 1);
break;
case NPY_CDOUBLE:
- cblas_zgemv(order, trans, m, n, oneD, Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_zgemv)(order, trans, m, n, oneD, Adata, lda, Xdata, incX,
zeroD, Rdata, 1);
break;
case NPY_CFLOAT:
- cblas_cgemv(order, trans, m, n, oneF, Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_cgemv)(order, trans, m, n, oneF, Adata, lda, Xdata, incX,
zeroF, Rdata, 1);
break;
}
@@ -91,19 +91,19 @@ gemv(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
*/
static void
syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
- int n, int k,
- PyArrayObject *A, int lda, PyArrayObject *R)
+ npy_intp n, npy_intp k,
+ PyArrayObject *A, npy_intp lda, PyArrayObject *R)
{
const void *Adata = PyArray_DATA(A);
void *Rdata = PyArray_DATA(R);
- int ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
+ npy_intp ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
npy_intp i;
npy_intp j;
switch (typenum) {
case NPY_DOUBLE:
- cblas_dsyrk(order, CblasUpper, trans, n, k, 1.,
+ CBLAS_FUNC(cblas_dsyrk)(order, CblasUpper, trans, n, k, 1.,
Adata, lda, 0., Rdata, ldc);
for (i = 0; i < n; i++) {
@@ -114,7 +114,7 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
}
break;
case NPY_FLOAT:
- cblas_ssyrk(order, CblasUpper, trans, n, k, 1.f,
+ CBLAS_FUNC(cblas_ssyrk)(order, CblasUpper, trans, n, k, 1.f,
Adata, lda, 0.f, Rdata, ldc);
for (i = 0; i < n; i++) {
@@ -125,7 +125,7 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
}
break;
case NPY_CDOUBLE:
- cblas_zsyrk(order, CblasUpper, trans, n, k, oneD,
+ CBLAS_FUNC(cblas_zsyrk)(order, CblasUpper, trans, n, k, oneD,
Adata, lda, zeroD, Rdata, ldc);
for (i = 0; i < n; i++) {
@@ -136,7 +136,7 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
}
break;
case NPY_CFLOAT:
- cblas_csyrk(order, CblasUpper, trans, n, k, oneF,
+ CBLAS_FUNC(cblas_csyrk)(order, CblasUpper, trans, n, k, oneF,
Adata, lda, zeroF, Rdata, ldc);
for (i = 0; i < n; i++) {
@@ -222,7 +222,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
PyArrayObject *out)
{
PyArrayObject *result = NULL, *out_buf = NULL;
- int j, lda, ldb;
+ npy_intp j, lda, ldb;
npy_intp l;
int nd;
npy_intp ap1stride = 0;
@@ -385,14 +385,15 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
*((double *)PyArray_DATA(ap1));
}
else if (ap1shape != _matrix) {
- cblas_daxpy(l,
+ CBLAS_FUNC(cblas_daxpy)(l,
*((double *)PyArray_DATA(ap2)),
(double *)PyArray_DATA(ap1),
ap1stride/sizeof(double),
(double *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
double val;
@@ -405,7 +406,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(double);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(double);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_daxpy(l, val, (double *)ptr, a1s,
+ CBLAS_FUNC(cblas_daxpy)(l, val, (double *)ptr, a1s,
(double *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
@@ -423,14 +424,15 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real;
}
else if (ap1shape != _matrix) {
- cblas_zaxpy(l,
+ CBLAS_FUNC(cblas_zaxpy)(l,
(double *)PyArray_DATA(ap2),
(double *)PyArray_DATA(ap1),
ap1stride/sizeof(npy_cdouble),
(double *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
double *pval;
@@ -443,7 +445,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(npy_cdouble);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(npy_cdouble);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_zaxpy(l, pval, (double *)ptr, a1s,
+ CBLAS_FUNC(cblas_zaxpy)(l, pval, (double *)ptr, a1s,
(double *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
@@ -456,14 +458,15 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
*((float *)PyArray_DATA(ap1));
}
else if (ap1shape != _matrix) {
- cblas_saxpy(l,
+ CBLAS_FUNC(cblas_saxpy)(l,
*((float *)PyArray_DATA(ap2)),
(float *)PyArray_DATA(ap1),
ap1stride/sizeof(float),
(float *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
float val;
@@ -476,7 +479,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(float);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(float);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_saxpy(l, val, (float *)ptr, a1s,
+ CBLAS_FUNC(cblas_saxpy)(l, val, (float *)ptr, a1s,
(float *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
@@ -494,14 +497,15 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real;
}
else if (ap1shape != _matrix) {
- cblas_caxpy(l,
+ CBLAS_FUNC(cblas_caxpy)(l,
(float *)PyArray_DATA(ap2),
(float *)PyArray_DATA(ap1),
ap1stride/sizeof(npy_cfloat),
(float *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
float *pval;
@@ -514,7 +518,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(npy_cfloat);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(npy_cfloat);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_caxpy(l, pval, (float *)ptr, a1s,
+ CBLAS_FUNC(cblas_caxpy)(l, pval, (float *)ptr, a1s,
(float *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
@@ -537,7 +541,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
/* Matrix vector multiplication -- Level 2 BLAS */
/* lda must be MAX(M,1) */
enum CBLAS_ORDER Order;
- int ap2s;
+ npy_intp ap2s;
if (!PyArray_ISONESEGMENT(ap1)) {
PyObject *new;
@@ -564,7 +568,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
else if (ap1shape != _matrix && ap2shape == _matrix) {
/* Vector matrix multiplication -- Level 2 BLAS */
enum CBLAS_ORDER Order;
- int ap1s;
+ npy_intp ap1s;
if (!PyArray_ISONESEGMENT(ap2)) {
PyObject *new;
@@ -601,7 +605,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
*/
enum CBLAS_ORDER Order;
enum CBLAS_TRANSPOSE Trans1, Trans2;
- int M, N, L;
+ npy_intp M, N, L;
/* Optimization possible: */
/*
diff --git a/numpy/core/src/common/get_attr_string.h b/numpy/core/src/common/get_attr_string.h
index d458d9550..8b7cf1c5b 100644
--- a/numpy/core/src/common/get_attr_string.h
+++ b/numpy/core/src/common/get_attr_string.h
@@ -7,9 +7,6 @@ _is_basic_python_type(PyTypeObject *tp)
return (
/* Basic number types */
tp == &PyBool_Type ||
-#if !defined(NPY_PY3K)
- tp == &PyInt_Type ||
-#endif
tp == &PyLong_Type ||
tp == &PyFloat_Type ||
tp == &PyComplex_Type ||
@@ -22,9 +19,6 @@ _is_basic_python_type(PyTypeObject *tp)
tp == &PyFrozenSet_Type ||
tp == &PyUnicode_Type ||
tp == &PyBytes_Type ||
-#if !defined(NPY_PY3K)
- tp == &PyString_Type ||
-#endif
/* other builtins */
tp == &PySlice_Type ||
@@ -40,45 +34,37 @@ _is_basic_python_type(PyTypeObject *tp)
}
/*
- * Stripped down version of PyObject_GetAttrString,
- * avoids lookups for None, tuple, and List objects,
- * and doesn't create a PyErr since this code ignores it.
- *
- * This can be much faster then PyObject_GetAttrString where
- * exceptions are not used by caller.
+ * Stripped down version of PyObject_GetAttrString(obj, name) that does not
+ * raise PyExc_AttributeError.
*
- * 'obj' is the object to search for attribute.
+ * This allows it to avoid creating then discarding exception objects when
+ * performing lookups on objects without any attributes.
*
- * 'name' is the attribute to search for.
- *
- * Returns attribute value on success, NULL on failure.
+ * Returns attribute value on success, NULL without an exception set if
+ * there is no such attribute, and NULL with an exception on failure.
*/
static NPY_INLINE PyObject *
-maybe_get_attr(PyObject *obj, char *name)
+maybe_get_attr(PyObject *obj, char const *name)
{
PyTypeObject *tp = Py_TYPE(obj);
PyObject *res = (PyObject *)NULL;
/* Attribute referenced by (char *)name */
if (tp->tp_getattr != NULL) {
- res = (*tp->tp_getattr)(obj, name);
- if (res == NULL) {
+ res = (*tp->tp_getattr)(obj, (char *)name);
+ if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
}
}
/* Attribute referenced by (PyObject *)name */
else if (tp->tp_getattro != NULL) {
-#if defined(NPY_PY3K)
PyObject *w = PyUnicode_InternFromString(name);
-#else
- PyObject *w = PyString_InternFromString(name);
-#endif
if (w == NULL) {
return (PyObject *)NULL;
}
res = (*tp->tp_getattro)(obj, w);
Py_DECREF(w);
- if (res == NULL) {
+ if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
}
}
@@ -95,7 +81,7 @@ maybe_get_attr(PyObject *obj, char *name)
* In future, could be made more like _Py_LookupSpecial
*/
static NPY_INLINE PyObject *
-PyArray_LookupSpecial(PyObject *obj, char *name)
+PyArray_LookupSpecial(PyObject *obj, char const *name)
{
PyTypeObject *tp = Py_TYPE(obj);
@@ -115,7 +101,7 @@ PyArray_LookupSpecial(PyObject *obj, char *name)
* Kept for backwards compatibility. In future, we should deprecate this.
*/
static NPY_INLINE PyObject *
-PyArray_LookupSpecial_OnInstance(PyObject *obj, char *name)
+PyArray_LookupSpecial_OnInstance(PyObject *obj, char const *name)
{
PyTypeObject *tp = Py_TYPE(obj);
diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h
index bacd27473..9208d5499 100644
--- a/numpy/core/src/common/lowlevel_strided_loops.h
+++ b/numpy/core/src/common/lowlevel_strided_loops.h
@@ -306,30 +306,30 @@ PyArray_CastRawArrays(npy_intp count,
NPY_NO_EXPORT npy_intp
PyArray_TransferNDimToStrided(npy_intp ndim,
char *dst, npy_intp dst_stride,
- char *src, npy_intp *src_strides, npy_intp src_strides_inc,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ char *src, npy_intp const *src_strides, npy_intp src_strides_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_StridedUnaryOp *stransfer,
NpyAuxData *transferdata);
NPY_NO_EXPORT npy_intp
PyArray_TransferStridedToNDim(npy_intp ndim,
- char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
+ char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_StridedUnaryOp *stransfer,
NpyAuxData *transferdata);
NPY_NO_EXPORT npy_intp
PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
- char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
+ char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
npy_bool *mask, npy_intp mask_stride,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_MaskedStridedUnaryOp *stransfer,
NpyAuxData *data);
@@ -365,8 +365,8 @@ mapiter_set(PyArrayMapIterObject *mit);
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
- char *data, npy_intp *strides,
+PyArray_PrepareOneRawArrayIter(int ndim, npy_intp const *shape,
+ char *data, npy_intp const *strides,
int *out_ndim, npy_intp *out_shape,
char **out_data, npy_intp *out_strides);
@@ -387,9 +387,9 @@ PyArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
- char *dataA, npy_intp *stridesA,
- char *dataB, npy_intp *stridesB,
+PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp const *shape,
+ char *dataA, npy_intp const *stridesA,
+ char *dataB, npy_intp const *stridesB,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB);
@@ -411,10 +411,10 @@ PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
- char *dataA, npy_intp *stridesA,
- char *dataB, npy_intp *stridesB,
- char *dataC, npy_intp *stridesC,
+PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp const *shape,
+ char *dataA, npy_intp const *stridesA,
+ char *dataB, npy_intp const *stridesB,
+ char *dataC, npy_intp const *stridesC,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB,
diff --git a/numpy/core/src/common/npy_cblas.h b/numpy/core/src/common/npy_cblas.h
index a083f3bcc..97308238a 100644
--- a/numpy/core/src/common/npy_cblas.h
+++ b/numpy/core/src/common/npy_cblas.h
@@ -17,565 +17,47 @@ extern "C"
/*
* Enumerated and derived types
*/
-#define CBLAS_INDEX size_t /* this may vary between platforms */
-
enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102};
enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113};
enum CBLAS_UPLO {CblasUpper=121, CblasLower=122};
enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};
enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
-/*
- * ===========================================================================
- * Prototypes for level 1 BLAS functions (complex are recast as routines)
- * ===========================================================================
- */
-float cblas_sdsdot(const int N, const float alpha, const float *X,
- const int incX, const float *Y, const int incY);
-double cblas_dsdot(const int N, const float *X, const int incX, const float *Y,
- const int incY);
-float cblas_sdot(const int N, const float *X, const int incX,
- const float *Y, const int incY);
-double cblas_ddot(const int N, const double *X, const int incX,
- const double *Y, const int incY);
-
-/*
- * Functions having prefixes Z and C only
- */
-void cblas_cdotu_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotu);
-void cblas_cdotc_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotc);
-
-void cblas_zdotu_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotu);
-void cblas_zdotc_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotc);
-
-
-/*
- * Functions having prefixes S D SC DZ
- */
-float cblas_snrm2(const int N, const float *X, const int incX);
-float cblas_sasum(const int N, const float *X, const int incX);
-
-double cblas_dnrm2(const int N, const double *X, const int incX);
-double cblas_dasum(const int N, const double *X, const int incX);
-
-float cblas_scnrm2(const int N, const void *X, const int incX);
-float cblas_scasum(const int N, const void *X, const int incX);
-
-double cblas_dznrm2(const int N, const void *X, const int incX);
-double cblas_dzasum(const int N, const void *X, const int incX);
-
-
-/*
- * Functions having standard 4 prefixes (S D C Z)
- */
-CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX);
-CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);
-CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX);
-CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX);
-
-/*
- * ===========================================================================
- * Prototypes for level 1 BLAS routines
- * ===========================================================================
- */
-
-/*
- * Routines with standard 4 prefixes (s, d, c, z)
- */
-void cblas_sswap(const int N, float *X, const int incX,
- float *Y, const int incY);
-void cblas_scopy(const int N, const float *X, const int incX,
- float *Y, const int incY);
-void cblas_saxpy(const int N, const float alpha, const float *X,
- const int incX, float *Y, const int incY);
-
-void cblas_dswap(const int N, double *X, const int incX,
- double *Y, const int incY);
-void cblas_dcopy(const int N, const double *X, const int incX,
- double *Y, const int incY);
-void cblas_daxpy(const int N, const double alpha, const double *X,
- const int incX, double *Y, const int incY);
-
-void cblas_cswap(const int N, void *X, const int incX,
- void *Y, const int incY);
-void cblas_ccopy(const int N, const void *X, const int incX,
- void *Y, const int incY);
-void cblas_caxpy(const int N, const void *alpha, const void *X,
- const int incX, void *Y, const int incY);
-
-void cblas_zswap(const int N, void *X, const int incX,
- void *Y, const int incY);
-void cblas_zcopy(const int N, const void *X, const int incX,
- void *Y, const int incY);
-void cblas_zaxpy(const int N, const void *alpha, const void *X,
- const int incX, void *Y, const int incY);
-
-
-/*
- * Routines with S and D prefix only
- */
-void cblas_srotg(float *a, float *b, float *c, float *s);
-void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);
-void cblas_srot(const int N, float *X, const int incX,
- float *Y, const int incY, const float c, const float s);
-void cblas_srotm(const int N, float *X, const int incX,
- float *Y, const int incY, const float *P);
-
-void cblas_drotg(double *a, double *b, double *c, double *s);
-void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);
-void cblas_drot(const int N, double *X, const int incX,
- double *Y, const int incY, const double c, const double s);
-void cblas_drotm(const int N, double *X, const int incX,
- double *Y, const int incY, const double *P);
-
-
-/*
- * Routines with S D C Z CS and ZD prefixes
- */
-void cblas_sscal(const int N, const float alpha, float *X, const int incX);
-void cblas_dscal(const int N, const double alpha, double *X, const int incX);
-void cblas_cscal(const int N, const void *alpha, void *X, const int incX);
-void cblas_zscal(const int N, const void *alpha, void *X, const int incX);
-void cblas_csscal(const int N, const float alpha, void *X, const int incX);
-void cblas_zdscal(const int N, const double alpha, void *X, const int incX);
-
-/*
- * ===========================================================================
- * Prototypes for level 2 BLAS
- * ===========================================================================
- */
-
-/*
- * Routines with standard 4 prefixes (S, D, C, Z)
- */
-void cblas_sgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const float alpha, const float *A, const int lda,
- const float *X, const int incX, const float beta,
- float *Y, const int incY);
-void cblas_sgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const float alpha,
- const float *A, const int lda, const float *X,
- const int incX, const float beta, float *Y, const int incY);
-void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *A, const int lda,
- float *X, const int incX);
-void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const float *A, const int lda,
- float *X, const int incX);
-void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *Ap, float *X, const int incX);
-void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *A, const int lda, float *X,
- const int incX);
-void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const float *A, const int lda,
- float *X, const int incX);
-void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *Ap, float *X, const int incX);
-
-void cblas_dgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const double alpha, const double *A, const int lda,
- const double *X, const int incX, const double beta,
- double *Y, const int incY);
-void cblas_dgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const double alpha,
- const double *A, const int lda, const double *X,
- const int incX, const double beta, double *Y, const int incY);
-void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *A, const int lda,
- double *X, const int incX);
-void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const double *A, const int lda,
- double *X, const int incX);
-void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *Ap, double *X, const int incX);
-void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *A, const int lda, double *X,
- const int incX);
-void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const double *A, const int lda,
- double *X, const int incX);
-void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *Ap, double *X, const int incX);
-
-void cblas_cgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *X, const int incX, const void *beta,
- void *Y, const int incY);
-void cblas_cgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const void *alpha,
- const void *A, const int lda, const void *X,
- const int incX, const void *beta, void *Y, const int incY);
-void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda, void *X,
- const int incX);
-void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-
-void cblas_zgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *X, const int incX, const void *beta,
- void *Y, const int incY);
-void cblas_zgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const void *alpha,
- const void *A, const int lda, const void *X,
- const int incX, const void *beta, void *Y, const int incY);
-void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda, void *X,
- const int incX);
-void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-
-
-/*
- * Routines with S and D prefixes only
- */
-void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *A,
- const int lda, const float *X, const int incX,
- const float beta, float *Y, const int incY);
-void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const float alpha, const float *A,
- const int lda, const float *X, const int incX,
- const float beta, float *Y, const int incY);
-void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *Ap,
- const float *X, const int incX,
- const float beta, float *Y, const int incY);
-void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,
- const float alpha, const float *X, const int incX,
- const float *Y, const int incY, float *A, const int lda);
-void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, float *A, const int lda);
-void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, float *Ap);
-void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, const float *Y, const int incY, float *A,
- const int lda);
-void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, const float *Y, const int incY, float *A);
-
-void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *A,
- const int lda, const double *X, const int incX,
- const double beta, double *Y, const int incY);
-void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const double alpha, const double *A,
- const int lda, const double *X, const int incX,
- const double beta, double *Y, const int incY);
-void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *Ap,
- const double *X, const int incX,
- const double beta, double *Y, const int incY);
-void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,
- const double alpha, const double *X, const int incX,
- const double *Y, const int incY, double *A, const int lda);
-void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, double *A, const int lda);
-void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, double *Ap);
-void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, const double *Y, const int incY, double *A,
- const int lda);
-void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, const double *Y, const int incY, double *A);
-
-
-/*
- * Routines with C and Z prefixes only
- */
-void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *Ap,
- const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const void *X, const int incX,
- void *A, const int lda);
-void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const void *X,
- const int incX, void *A);
-void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *Ap);
-
-void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *Ap,
- const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const void *X, const int incX,
- void *A, const int lda);
-void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const void *X,
- const int incX, void *A);
-void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *Ap);
+#define CBLAS_INDEX size_t /* this may vary between platforms */
-/*
- * ===========================================================================
- * Prototypes for level 3 BLAS
- * ===========================================================================
- */
+#ifdef NO_APPEND_FORTRAN
+#define BLAS_FORTRAN_SUFFIX
+#else
+#define BLAS_FORTRAN_SUFFIX _
+#endif
-/*
- * Routines with standard 4 prefixes (S, D, C, Z)
- */
-void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const float alpha, const float *A,
- const int lda, const float *B, const int ldb,
- const float beta, float *C, const int ldc);
-void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const float alpha, const float *A, const int lda,
- const float *B, const int ldb, const float beta,
- float *C, const int ldc);
-void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const float alpha, const float *A, const int lda,
- const float beta, float *C, const int ldc);
-void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const float alpha, const float *A, const int lda,
- const float *B, const int ldb, const float beta,
- float *C, const int ldc);
-void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const float alpha, const float *A, const int lda,
- float *B, const int ldb);
-void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const float alpha, const float *A, const int lda,
- float *B, const int ldb);
+#ifndef BLAS_SYMBOL_PREFIX
+#define BLAS_SYMBOL_PREFIX
+#endif
-void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const double alpha, const double *A,
- const int lda, const double *B, const int ldb,
- const double beta, double *C, const int ldc);
-void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const double alpha, const double *A, const int lda,
- const double *B, const int ldb, const double beta,
- double *C, const int ldc);
-void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const double alpha, const double *A, const int lda,
- const double beta, double *C, const int ldc);
-void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const double alpha, const double *A, const int lda,
- const double *B, const int ldb, const double beta,
- double *C, const int ldc);
-void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const double alpha, const double *A, const int lda,
- double *B, const int ldb);
-void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const double alpha, const double *A, const int lda,
- double *B, const int ldb);
+#ifndef BLAS_SYMBOL_SUFFIX
+#define BLAS_SYMBOL_SUFFIX
+#endif
-void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const void *alpha, const void *A,
- const int lda, const void *B, const int ldb,
- const void *beta, void *C, const int ldc);
-void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *beta, void *C, const int ldc);
-void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
-void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
+#define BLAS_FUNC_CONCAT(name,prefix,suffix,suffix2) prefix ## name ## suffix ## suffix2
+#define BLAS_FUNC_EXPAND(name,prefix,suffix,suffix2) BLAS_FUNC_CONCAT(name,prefix,suffix,suffix2)
-void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const void *alpha, const void *A,
- const int lda, const void *B, const int ldb,
- const void *beta, void *C, const int ldc);
-void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *beta, void *C, const int ldc);
-void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
-void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
+#define CBLAS_FUNC(name) BLAS_FUNC_EXPAND(name,BLAS_SYMBOL_PREFIX,,BLAS_SYMBOL_SUFFIX)
+#define BLAS_FUNC(name) BLAS_FUNC_EXPAND(name,BLAS_SYMBOL_PREFIX,BLAS_FORTRAN_SUFFIX,BLAS_SYMBOL_SUFFIX)
+#ifdef HAVE_BLAS_ILP64
+#define CBLAS_INT npy_int64
+#else
+#define CBLAS_INT int
+#endif
-/*
- * Routines with prefixes C and Z only
- */
-void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const float alpha, const void *A, const int lda,
- const float beta, void *C, const int ldc);
-void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const float beta,
- void *C, const int ldc);
+#define BLASNAME(name) CBLAS_FUNC(name)
+#define BLASINT CBLAS_INT
-void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const double alpha, const void *A, const int lda,
- const double beta, void *C, const int ldc);
-void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const double beta,
- void *C, const int ldc);
+#include "npy_cblas_base.h"
-void cblas_xerbla(int p, const char *rout, const char *form, ...);
+#undef BLASINT
+#undef BLASNAME
#ifdef __cplusplus
}
diff --git a/numpy/core/src/common/npy_cblas_base.h b/numpy/core/src/common/npy_cblas_base.h
new file mode 100644
index 000000000..792b6f09e
--- /dev/null
+++ b/numpy/core/src/common/npy_cblas_base.h
@@ -0,0 +1,557 @@
+/*
+ * This header provides numpy a consistent interface to CBLAS code. It is needed
+ * because not all providers of cblas provide cblas.h. For instance, MKL provides
+ * mkl_cblas.h and also typedefs the CBLAS_XXX enums.
+ */
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS functions (complex are recast as routines)
+ * ===========================================================================
+ */
+float BLASNAME(cblas_sdsdot)(const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, const float *Y, const BLASINT incY);
+double BLASNAME(cblas_dsdot)(const BLASINT N, const float *X, const BLASINT incX, const float *Y,
+ const BLASINT incY);
+float BLASNAME(cblas_sdot)(const BLASINT N, const float *X, const BLASINT incX,
+ const float *Y, const BLASINT incY);
+double BLASNAME(cblas_ddot)(const BLASINT N, const double *X, const BLASINT incX,
+ const double *Y, const BLASINT incY);
+
+/*
+ * Functions having prefixes Z and C only
+ */
+void BLASNAME(cblas_cdotu_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotu);
+void BLASNAME(cblas_cdotc_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotc);
+
+void BLASNAME(cblas_zdotu_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotu);
+void BLASNAME(cblas_zdotc_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotc);
+
+
+/*
+ * Functions having prefixes S D SC DZ
+ */
+float BLASNAME(cblas_snrm2)(const BLASINT N, const float *X, const BLASINT incX);
+float BLASNAME(cblas_sasum)(const BLASINT N, const float *X, const BLASINT incX);
+
+double BLASNAME(cblas_dnrm2)(const BLASINT N, const double *X, const BLASINT incX);
+double BLASNAME(cblas_dasum)(const BLASINT N, const double *X, const BLASINT incX);
+
+float BLASNAME(cblas_scnrm2)(const BLASINT N, const void *X, const BLASINT incX);
+float BLASNAME(cblas_scasum)(const BLASINT N, const void *X, const BLASINT incX);
+
+double BLASNAME(cblas_dznrm2)(const BLASINT N, const void *X, const BLASINT incX);
+double BLASNAME(cblas_dzasum)(const BLASINT N, const void *X, const BLASINT incX);
+
+
+/*
+ * Functions having standard 4 prefixes (S D C Z)
+ */
+CBLAS_INDEX BLASNAME(cblas_isamax)(const BLASINT N, const float *X, const BLASINT incX);
+CBLAS_INDEX BLASNAME(cblas_idamax)(const BLASINT N, const double *X, const BLASINT incX);
+CBLAS_INDEX BLASNAME(cblas_icamax)(const BLASINT N, const void *X, const BLASINT incX);
+CBLAS_INDEX BLASNAME(cblas_izamax)(const BLASINT N, const void *X, const BLASINT incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS routines
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (s, d, c, z)
+ */
+void BLASNAME(cblas_sswap)(const BLASINT N, float *X, const BLASINT incX,
+ float *Y, const BLASINT incY);
+void BLASNAME(cblas_scopy)(const BLASINT N, const float *X, const BLASINT incX,
+ float *Y, const BLASINT incY);
+void BLASNAME(cblas_saxpy)(const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, float *Y, const BLASINT incY);
+
+void BLASNAME(cblas_dswap)(const BLASINT N, double *X, const BLASINT incX,
+ double *Y, const BLASINT incY);
+void BLASNAME(cblas_dcopy)(const BLASINT N, const double *X, const BLASINT incX,
+ double *Y, const BLASINT incY);
+void BLASNAME(cblas_daxpy)(const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, double *Y, const BLASINT incY);
+
+void BLASNAME(cblas_cswap)(const BLASINT N, void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_ccopy)(const BLASINT N, const void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_caxpy)(const BLASINT N, const void *alpha, const void *X,
+ const BLASINT incX, void *Y, const BLASINT incY);
+
+void BLASNAME(cblas_zswap)(const BLASINT N, void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_zcopy)(const BLASINT N, const void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_zaxpy)(const BLASINT N, const void *alpha, const void *X,
+ const BLASINT incX, void *Y, const BLASINT incY);
+
+
+/*
+ * Routines with S and D prefix only
+ */
+void BLASNAME(cblas_srotg)(float *a, float *b, float *c, float *s);
+void BLASNAME(cblas_srotmg)(float *d1, float *d2, float *b1, const float b2, float *P);
+void BLASNAME(cblas_srot)(const BLASINT N, float *X, const BLASINT incX,
+ float *Y, const BLASINT incY, const float c, const float s);
+void BLASNAME(cblas_srotm)(const BLASINT N, float *X, const BLASINT incX,
+ float *Y, const BLASINT incY, const float *P);
+
+void BLASNAME(cblas_drotg)(double *a, double *b, double *c, double *s);
+void BLASNAME(cblas_drotmg)(double *d1, double *d2, double *b1, const double b2, double *P);
+void BLASNAME(cblas_drot)(const BLASINT N, double *X, const BLASINT incX,
+ double *Y, const BLASINT incY, const double c, const double s);
+void BLASNAME(cblas_drotm)(const BLASINT N, double *X, const BLASINT incX,
+ double *Y, const BLASINT incY, const double *P);
+
+
+/*
+ * Routines with S D C Z CS and ZD prefixes
+ */
+void BLASNAME(cblas_sscal)(const BLASINT N, const float alpha, float *X, const BLASINT incX);
+void BLASNAME(cblas_dscal)(const BLASINT N, const double alpha, double *X, const BLASINT incX);
+void BLASNAME(cblas_cscal)(const BLASINT N, const void *alpha, void *X, const BLASINT incX);
+void BLASNAME(cblas_zscal)(const BLASINT N, const void *alpha, void *X, const BLASINT incX);
+void BLASNAME(cblas_csscal)(const BLASINT N, const float alpha, void *X, const BLASINT incX);
+void BLASNAME(cblas_zdscal)(const BLASINT N, const double alpha, void *X, const BLASINT incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 2 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void BLASNAME(cblas_sgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ const float *X, const BLASINT incX, const float beta,
+ float *Y, const BLASINT incY);
+void BLASNAME(cblas_sgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const float alpha,
+ const float *A, const BLASINT lda, const float *X,
+ const BLASINT incX, const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_strmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *A, const BLASINT lda,
+ float *X, const BLASINT incX);
+void BLASNAME(cblas_stbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const float *A, const BLASINT lda,
+ float *X, const BLASINT incX);
+void BLASNAME(cblas_stpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *Ap, float *X, const BLASINT incX);
+void BLASNAME(cblas_strsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *A, const BLASINT lda, float *X,
+ const BLASINT incX);
+void BLASNAME(cblas_stbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const float *A, const BLASINT lda,
+ float *X, const BLASINT incX);
+void BLASNAME(cblas_stpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *Ap, float *X, const BLASINT incX);
+
+void BLASNAME(cblas_dgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ const double *X, const BLASINT incX, const double beta,
+ double *Y, const BLASINT incY);
+void BLASNAME(cblas_dgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const double alpha,
+ const double *A, const BLASINT lda, const double *X,
+ const BLASINT incX, const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dtrmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *A, const BLASINT lda,
+ double *X, const BLASINT incX);
+void BLASNAME(cblas_dtbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const double *A, const BLASINT lda,
+ double *X, const BLASINT incX);
+void BLASNAME(cblas_dtpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *Ap, double *X, const BLASINT incX);
+void BLASNAME(cblas_dtrsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *A, const BLASINT lda, double *X,
+ const BLASINT incX);
+void BLASNAME(cblas_dtbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const double *A, const BLASINT lda,
+ double *X, const BLASINT incX);
+void BLASNAME(cblas_dtpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *Ap, double *X, const BLASINT incX);
+
+void BLASNAME(cblas_cgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *X, const BLASINT incX, const void *beta,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_cgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const void *alpha,
+ const void *A, const BLASINT lda, const void *X,
+ const BLASINT incX, const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_ctrmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ctbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ctpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+void BLASNAME(cblas_ctrsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda, void *X,
+ const BLASINT incX);
+void BLASNAME(cblas_ctbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ctpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+
+void BLASNAME(cblas_zgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *X, const BLASINT incX, const void *beta,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_zgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const void *alpha,
+ const void *A, const BLASINT lda, const void *X,
+ const BLASINT incX, const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_ztrmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ztbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ztpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+void BLASNAME(cblas_ztrsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda, void *X,
+ const BLASINT incX);
+void BLASNAME(cblas_ztbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ztpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+
+
+/*
+ * Routines with S and D prefixes only
+ */
+void BLASNAME(cblas_ssymv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *A,
+ const BLASINT lda, const float *X, const BLASINT incX,
+ const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_ssbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const float alpha, const float *A,
+ const BLASINT lda, const float *X, const BLASINT incX,
+ const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_sspmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *Ap,
+ const float *X, const BLASINT incX,
+ const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_sger)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const float alpha, const float *X, const BLASINT incX,
+ const float *Y, const BLASINT incY, float *A, const BLASINT lda);
+void BLASNAME(cblas_ssyr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, float *A, const BLASINT lda);
+void BLASNAME(cblas_sspr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, float *Ap);
+void BLASNAME(cblas_ssyr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, const float *Y, const BLASINT incY, float *A,
+ const BLASINT lda);
+void BLASNAME(cblas_sspr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, const float *Y, const BLASINT incY, float *A);
+
+void BLASNAME(cblas_dsymv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *A,
+ const BLASINT lda, const double *X, const BLASINT incX,
+ const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dsbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const double alpha, const double *A,
+ const BLASINT lda, const double *X, const BLASINT incX,
+ const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dspmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *Ap,
+ const double *X, const BLASINT incX,
+ const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dger)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const double alpha, const double *X, const BLASINT incX,
+ const double *Y, const BLASINT incY, double *A, const BLASINT lda);
+void BLASNAME(cblas_dsyr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, double *A, const BLASINT lda);
+void BLASNAME(cblas_dspr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, double *Ap);
+void BLASNAME(cblas_dsyr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, const double *Y, const BLASINT incY, double *A,
+ const BLASINT lda);
+void BLASNAME(cblas_dspr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, const double *Y, const BLASINT incY, double *A);
+
+
+/*
+ * Routines with C and Z prefixes only
+ */
+void BLASNAME(cblas_chemv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_chbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_chpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *Ap,
+ const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_cgeru)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_cgerc)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_cher)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const void *X, const BLASINT incX,
+ void *A, const BLASINT lda);
+void BLASNAME(cblas_chpr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const void *X,
+ const BLASINT incX, void *A);
+void BLASNAME(cblas_cher2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_chpr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *Ap);
+
+void BLASNAME(cblas_zhemv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_zhbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_zhpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *Ap,
+ const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_zgeru)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_zgerc)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_zher)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const void *X, const BLASINT incX,
+ void *A, const BLASINT lda);
+void BLASNAME(cblas_zhpr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const void *X,
+ const BLASINT incX, void *A);
+void BLASNAME(cblas_zher2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_zhpr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *Ap);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 3 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void BLASNAME(cblas_sgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const float alpha, const float *A,
+ const BLASINT lda, const float *B, const BLASINT ldb,
+ const float beta, float *C, const BLASINT ldc);
+void BLASNAME(cblas_ssymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ const float *B, const BLASINT ldb, const float beta,
+ float *C, const BLASINT ldc);
+void BLASNAME(cblas_ssyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const float alpha, const float *A, const BLASINT lda,
+ const float beta, float *C, const BLASINT ldc);
+void BLASNAME(cblas_ssyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const float alpha, const float *A, const BLASINT lda,
+ const float *B, const BLASINT ldb, const float beta,
+ float *C, const BLASINT ldc);
+void BLASNAME(cblas_strmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ float *B, const BLASINT ldb);
+void BLASNAME(cblas_strsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ float *B, const BLASINT ldb);
+
+void BLASNAME(cblas_dgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const double alpha, const double *A,
+ const BLASINT lda, const double *B, const BLASINT ldb,
+ const double beta, double *C, const BLASINT ldc);
+void BLASNAME(cblas_dsymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ const double *B, const BLASINT ldb, const double beta,
+ double *C, const BLASINT ldc);
+void BLASNAME(cblas_dsyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const double alpha, const double *A, const BLASINT lda,
+ const double beta, double *C, const BLASINT ldc);
+void BLASNAME(cblas_dsyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const double alpha, const double *A, const BLASINT lda,
+ const double *B, const BLASINT ldb, const double beta,
+ double *C, const BLASINT ldc);
+void BLASNAME(cblas_dtrmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ double *B, const BLASINT ldb);
+void BLASNAME(cblas_dtrsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ double *B, const BLASINT ldb);
+
+void BLASNAME(cblas_cgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *B, const BLASINT ldb,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_csymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_csyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_csyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_ctrmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+void BLASNAME(cblas_ctrsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+
+void BLASNAME(cblas_zgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *B, const BLASINT ldb,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_zsymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_zsyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_zsyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_ztrmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+void BLASNAME(cblas_ztrsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+
+
+/*
+ * Routines with prefixes C and Z only
+ */
+void BLASNAME(cblas_chemm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_cherk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const float alpha, const void *A, const BLASINT lda,
+ const float beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_cher2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const float beta,
+ void *C, const BLASINT ldc);
+
+void BLASNAME(cblas_zhemm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_zherk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const double alpha, const void *A, const BLASINT lda,
+ const double beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_zher2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const double beta,
+ void *C, const BLASINT ldc);
+
+void BLASNAME(cblas_xerbla)(BLASINT p, const char *rout, const char *form, ...);
diff --git a/numpy/core/src/common/npy_config.h b/numpy/core/src/common/npy_config.h
index eedfbe364..aebe241a5 100644
--- a/numpy/core/src/common/npy_config.h
+++ b/numpy/core/src/common/npy_config.h
@@ -2,6 +2,7 @@
#define _NPY_NPY_CONFIG_H_
#include "config.h"
+#include "npy_cpu_features.h"
#include "numpy/numpyconfig.h"
#include "numpy/npy_cpu.h"
#include "numpy/npy_os.h"
diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src
new file mode 100644
index 000000000..4f193a471
--- /dev/null
+++ b/numpy/core/src/common/npy_cpu_features.c.src
@@ -0,0 +1,410 @@
+#include "npy_cpu_features.h"
+#include "numpy/npy_common.h" // for NPY_INLINE
+#include "numpy/npy_cpu.h" // To guarantee of having CPU definitions in scope.
+
+/******************** Private Definitions *********************/
+
+// Hold all CPU features boolean values
+static unsigned char npy__cpu_have[NPY_CPU_FEATURE_MAX];
+
+/******************** Private Declarations *********************/
+
+// Almost detect all CPU features in runtime
+static void
+npy__cpu_init_features(void);
+
+/******************** Public Definitions *********************/
+
+NPY_VISIBILITY_HIDDEN int
+npy_cpu_have(int feature_id)
+{
+ if (feature_id <= NPY_CPU_FEATURE_NONE || feature_id >= NPY_CPU_FEATURE_MAX)
+ return 0;
+ return npy__cpu_have[feature_id];
+}
+
+NPY_VISIBILITY_HIDDEN int
+npy_cpu_init(void)
+{
+ npy__cpu_init_features();
+ return 0;
+}
+
+NPY_VISIBILITY_HIDDEN PyObject *
+npy_cpu_features_dict(void)
+{
+ PyObject *dict = PyDict_New();
+ if (dict) {
+ /**begin repeat
+ * #feature = MMX, SSE, SSE2, SSE3, SSSE3, SSE41, POPCNT, SSE42,
+ * AVX, F16C, XOP, FMA4, FMA3, AVX2, AVX512F,
+ * AVX512CD, AVX512ER, AVX512PF, AVX5124FMAPS, AVX5124VNNIW,
+ * AVX512VPOPCNTDQ, AVX512VL, AVX512BW, AVX512DQ, AVX512VNNI,
+ * AVX512IFMA, AVX512VBMI, AVX512VBMI2, AVX512BITALG,
+ * AVX512_KNL, AVX512_KNM, AVX512_SKX, AVX512_CLX, AVX512_CNL, AVX512_ICL,
+ * VSX, VSX2, VSX3,
+ * NEON, NEON_FP16, NEON_VFPV4, ASIMD, FPHP, ASIMDHP, ASIMDDP, ASIMDFHM#
+ */
+ if (PyDict_SetItemString(dict, "@feature@",
+ npy__cpu_have[NPY_CPU_FEATURE_@feature@] ? Py_True : Py_False) < 0) {
+ Py_DECREF(dict);
+ return NULL;
+ }
+ /**end repeat**/
+ }
+ return dict;
+}
+
+/****************************************************************
+ * This section is reserved to defining @npy__cpu_init_features
+ * for each CPU architecture, please try to keep it clean. Ty
+ ****************************************************************/
+
+/***************** X86 ******************/
+
+#if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86)
+
+#ifdef _MSC_VER
+ #include <intrin.h>
+#elif defined(__INTEL_COMPILER)
+ #include <immintrin.h>
+#endif
+
+static int
+npy__cpu_getxcr0(void)
+{
+#if defined(_MSC_VER) || defined (__INTEL_COMPILER)
+ return _xgetbv(0);
+#elif defined(__GNUC__) || defined(__clang__)
+ unsigned int eax, edx;
+ __asm__("xgetbv" : "=a" (eax), "=d" (edx) : "c" (0));
+ return (eax | (unsigned long long)edx << 32);
+#else
+ // TODO: handle other x86 compilers
+ return 0;
+#endif
+}
+
+static void
+npy__cpu_cpuid(int reg[4], int func_id)
+{
+#if defined(_MSC_VER)
+ __cpuidex(reg, func_id, 0);
+#elif defined(__INTEL_COMPILER)
+ __cpuid(reg, func_id);
+#elif defined(__GNUC__) || defined(__clang__)
+ #if defined(NPY_CPU_X86) && defined(__PIC__)
+ // %ebx may be the PIC register
+ __asm__("xchg{l}\t{%%}ebx, %1\n\t"
+ "cpuid\n\t"
+ "xchg{l}\t{%%}ebx, %1\n\t"
+ : "=a" (reg[0]), "=r" (reg[1]), "=c" (reg[2]),
+ "=d" (reg[3])
+ : "a" (func_id), "c" (0)
+ );
+ #else
+ __asm__("cpuid\n\t"
+ : "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]),
+ "=d" (reg[3])
+ : "a" (func_id), "c" (0)
+ );
+ #endif
+#else
+ // TODO: handle other x86 compilers
+ reg[0] = 0;
+#endif
+}
+
+static void
+npy__cpu_init_features(void)
+{
+ memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX);
+
+ // validate platform support
+ int reg[] = {0, 0, 0, 0};
+ npy__cpu_cpuid(reg, 0);
+ if (reg[0] == 0)
+ return;
+
+ npy__cpu_cpuid(reg, 1);
+ npy__cpu_have[NPY_CPU_FEATURE_MMX] = (reg[3] & (1 << 23)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_SSE] = (reg[3] & (1 << 25)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_SSE2] = (reg[3] & (1 << 26)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_SSE3] = (reg[2] & (1 << 0)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_SSSE3] = (reg[2] & (1 << 9)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_SSE41] = (reg[2] & (1 << 19)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_POPCNT] = (reg[2] & (1 << 23)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_SSE42] = (reg[2] & (1 << 20)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_F16C] = (reg[2] & (1 << 29)) != 0;
+
+ // check OSXSAVE
+ if ((reg[2] & (1 << 27)) == 0)
+ return;
+ // check AVX OS support
+ int xcr = npy__cpu_getxcr0();
+ if ((xcr & 6) != 6)
+ return;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX] = (reg[2] & (1 << 28)) != 0;
+ if (!npy__cpu_have[NPY_CPU_FEATURE_AVX])
+ return;
+ npy__cpu_have[NPY_CPU_FEATURE_FMA3] = (reg[2] & (1 << 12)) != 0;
+
+ // second call to the cpuid to get extended AMD feature bits
+ npy__cpu_cpuid(reg, 0x80000001);
+ npy__cpu_have[NPY_CPU_FEATURE_XOP] = (reg[2] & (1 << 11)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_FMA4] = (reg[2] & (1 << 16)) != 0;
+
+ // third call to the cpuid to get extended AVX2 & AVX512 feature bits
+ npy__cpu_cpuid(reg, 7);
+ npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0;
+ if (!npy__cpu_have[NPY_CPU_FEATURE_AVX2])
+ return;
+ // detect AVX2 & FMA3
+ npy__cpu_have[NPY_CPU_FEATURE_FMA] = npy__cpu_have[NPY_CPU_FEATURE_FMA3];
+
+ // check AVX512 OS support
+ if ((xcr & 0xe6) != 0xe6)
+ return;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0;
+ if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && npy__cpu_have[NPY_CPU_FEATURE_AVX512CD]) {
+ // Knights Landing
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512PF] = (reg[1] & (1 << 26)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] = (reg[1] & (1 << 27)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512PF];
+ // Knights Mill
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ] = (reg[2] & (1 << 14)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] = (reg[3] & (1 << 2)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] = (reg[3] & (1 << 3)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNM] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ];
+
+ // Skylake-X
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] = (reg[1] & (1 << 17)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] = (reg[1] & (1 << 30)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (1 << 31)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VL];
+ // Cascade Lake
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] = (reg[2] & (1 << 11)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI];
+
+ // Cannon Lake
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] = (reg[1] & (1 << 21)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI] = (reg[2] & (1 << 1)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI];
+ // Ice Lake
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] = (reg[2] & (1 << 6)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] = (reg[2] & (1 << 12)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] &&
+ npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ];
+ }
+}
+
+/***************** POWER ******************/
+
+#elif defined(NPY_CPU_PPC64) || defined(NPY_CPU_PPC64LE)
+
+#ifdef __linux__
+ #include <sys/auxv.h>
+ #ifndef AT_HWCAP2
+ #define AT_HWCAP2 26
+ #endif
+ #ifndef PPC_FEATURE2_ARCH_3_00
+ #define PPC_FEATURE2_ARCH_3_00 0x00800000
+ #endif
+#endif
+
+static void
+npy__cpu_init_features(void)
+{
+ memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX);
+#ifdef __linux__
+ unsigned int hwcap = getauxval(AT_HWCAP);
+ if ((hwcap & PPC_FEATURE_HAS_VSX) == 0)
+ return;
+
+ hwcap = getauxval(AT_HWCAP2);
+ if (hwcap & PPC_FEATURE2_ARCH_3_00)
+ {
+ npy__cpu_have[NPY_CPU_FEATURE_VSX] =
+ npy__cpu_have[NPY_CPU_FEATURE_VSX2] =
+ npy__cpu_have[NPY_CPU_FEATURE_VSX3] = 1;
+ return;
+ }
+ npy__cpu_have[NPY_CPU_FEATURE_VSX2] = (hwcap & PPC_FEATURE2_ARCH_2_07) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_VSX] = 1;
+// TODO: AIX, FreeBSD
+#else
+ npy__cpu_have[NPY_CPU_FEATURE_VSX] = 1;
+ #if defined(NPY_CPU_PPC64LE) || defined(NPY_HAVE_VSX2)
+ npy__cpu_have[NPY_CPU_FEATURE_VSX2] = 1;
+ #endif
+ #ifdef NPY_HAVE_VSX3
+ npy__cpu_have[NPY_CPU_FEATURE_VSX3] = 1;
+ #endif
+#endif
+}
+
+/***************** ARM ******************/
+
+#elif defined(__arm__) || defined(__aarch64__)
+
+static NPY_INLINE void
+npy__cpu_init_features_arm8(void)
+{
+ npy__cpu_have[NPY_CPU_FEATURE_NEON] =
+ npy__cpu_have[NPY_CPU_FEATURE_NEON_FP16] =
+ npy__cpu_have[NPY_CPU_FEATURE_NEON_VFPV4] =
+ npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = 1;
+}
+
+#ifdef __linux__
+/*
+ * we aren't sure of what kind kernel or clib we deal with
+ * so we play it safe
+*/
+#include <stdio.h>
+#include <fcntl.h>
+
+#define NPY__HWCAP 16
+#define NPY__HWCAP2 26
+
+// arch/arm/include/uapi/asm/hwcap.h
+#define NPY__HWCAP_HALF (1 << 1)
+#define NPY__HWCAP_NEON (1 << 12)
+#define NPY__HWCAP_VFPv3 (1 << 13)
+#define NPY__HWCAP_VFPv4 (1 << 16)
+#define NPY__HWCAP2_AES (1 << 0)
+#define NPY__HWCAP2_PMULL (1 << 1)
+#define NPY__HWCAP2_SHA1 (1 << 2)
+#define NPY__HWCAP2_SHA2 (1 << 3)
+#define NPY__HWCAP2_CRC32 (1 << 4)
+// arch/arm64/include/uapi/asm/hwcap.h
+#define NPY__HWCAP_FP (1 << 0)
+#define NPY__HWCAP_ASIMD (1 << 1)
+#define NPY__HWCAP_FPHP (1 << 9)
+#define NPY__HWCAP_ASIMDHP (1 << 10)
+#define NPY__HWCAP_ASIMDDP (1 << 20)
+#define NPY__HWCAP_ASIMDFHM (1 << 23)
+
+__attribute__((weak)) unsigned long getauxval(unsigned long); // linker should handle it
+static int
+npy__cpu_init_features_linux(void)
+{
+ unsigned long hwcap = 0, hwcap2 = 0;
+ if (getauxval != 0) {
+ hwcap = getauxval(NPY__HWCAP);
+ #ifdef __arm__
+ hwcap2 = getauxval(NPY__HWCAP2);
+ #endif
+ } else {
+ unsigned long auxv[2];
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd >= 0) {
+ while (read(fd, &auxv, sizeof(auxv)) == sizeof(auxv)) {
+ if (auxv[0] == NPY__HWCAP) {
+ hwcap = auxv[1];
+ }
+ #ifdef __arm__
+ else if (auxv[0] == NPY__HWCAP2) {
+ hwcap2 = auxv[1];
+ }
+ #endif
+ // detect the end
+ else if (auxv[0] == 0 && auxv[1] == 0) {
+ break;
+ }
+ }
+ close(fd);
+ }
+ }
+ if (hwcap == 0 && hwcap2 == 0) {
+ /*
+ * FIXME: failback to compiler definitions,
+ * BTW we can parse /proc/cpuinfo for badly patched kernels
+ */
+ return 0;
+ }
+#ifdef __arm__
+ // Detect Arm8 (aarch32 state)
+ if ((hwcap2 & NPY__HWCAP2_AES) || (hwcap2 & NPY__HWCAP2_SHA1) ||
+ (hwcap2 & NPY__HWCAP2_SHA2) || (hwcap2 & NPY__HWCAP2_PMULL) ||
+ (hwcap2 & NPY__HWCAP2_CRC32))
+#else
+ if (1)
+#endif
+ {
+ if (!(hwcap & (NPY__HWCAP_FP | NPY__HWCAP_ASIMD))) {
+ // Is this could happen? maybe disabled by kernel
+ // BTW this will break the baseline of AARCH64
+ return 1;
+ }
+ npy__cpu_have[NPY_CPU_FEATURE_FPHP] = (hwcap & NPY__HWCAP_FPHP) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_ASIMDHP] = (hwcap & NPY__HWCAP_ASIMDHP) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_ASIMDDP] = (hwcap & NPY__HWCAP_ASIMDDP) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_ASIMDFHM] = (hwcap & NPY__HWCAP_ASIMDFHM) != 0;
+ npy__cpu_init_features_arm8();
+ } else {
+ npy__cpu_have[NPY_CPU_FEATURE_NEON] = (hwcap & NPY__HWCAP_NEON) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_NEON_FP16] = (hwcap & (NPY__HWCAP_NEON | NPY__HWCAP_VFPv3 |
+ NPY__HWCAP_HALF)) != 0;
+ npy__cpu_have[NPY_CPU_FEATURE_NEON_VFPV4] = (hwcap & (NPY__HWCAP_NEON | NPY__HWCAP_VFPv4)) != 0;
+ }
+ return 1;
+}
+#endif
+
+static void
+npy__cpu_init_features(void)
+{
+ memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX);
+#ifdef __linux__
+ if (npy__cpu_init_features_linux())
+ return;
+#endif
+ // We have nothing else todo
+#if defined(NPY_HAVE_NEON_ARM8) || defined(__aarch64__) || (defined(__ARM_ARCH) && __ARM_ARCH >= 8)
+ #if defined(NPY_HAVE_FPHP) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+ npy__cpu_have[NPY_CPU_FEATURE_FPHP] = 1;
+ #endif
+ #if defined(NPY_HAVE_ASIMDHP) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+ npy__cpu_have[NPY_CPU_FEATURE_ASIMDHP] = 1;
+ #endif
+ #if defined(NPY_HAVE_ASIMDDP) || defined(__ARM_FEATURE_DOTPROD)
+ npy__cpu_have[NPY_CPU_FEATURE_ASIMDDP] = 1;
+ #endif
+ #if defined(NPY_HAVE_ASIMDFHM) || defined(__ARM_FEATURE_FP16FML)
+ npy__cpu_have[NPY_CPU_FEATURE_ASIMDFHM] = 1;
+ #endif
+ npy__cpu_init_features_arm8();
+#else
+ #if defined(NPY_HAVE_NEON) || defined(__ARM_NEON__)
+ npy__cpu_have[NPY_CPU_FEATURE_NEON] = 1;
+ #endif
+ #if defined(NPY_HAVE_NEON_FP16) || defined(__ARM_FP16_FORMAT_IEEE) || (defined(__ARM_FP) && (__ARM_FP & 2))
+ npy__cpu_have[NPY_CPU_FEATURE_NEON_FP16] = npy__cpu_have[NPY_CPU_FEATURE_NEON];
+ #endif
+ #if defined(NPY_HAVE_NEON_VFPV4) || defined(__ARM_FEATURE_FMA)
+ npy__cpu_have[NPY_CPU_FEATURE_NEON_VFPV4] = npy__cpu_have[NPY_CPU_FEATURE_NEON];
+ #endif
+#endif
+}
+
+/*********** Unsupported ARCH ***********/
+#else
+static void
+npy__cpu_init_features(void)
+{
+}
+#endif
diff --git a/numpy/core/src/common/npy_cpu_features.h b/numpy/core/src/common/npy_cpu_features.h
new file mode 100644
index 000000000..0e8901328
--- /dev/null
+++ b/numpy/core/src/common/npy_cpu_features.h
@@ -0,0 +1,117 @@
+#ifndef _NPY_CPU_FEATURES_H_
+#define _NPY_CPU_FEATURES_H_
+
+#include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN
+#include <Python.h> // for PyObject
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum npy_cpu_features
+{
+ NPY_CPU_FEATURE_NONE = 0,
+ // X86
+ NPY_CPU_FEATURE_MMX = 1,
+ NPY_CPU_FEATURE_SSE = 2,
+ NPY_CPU_FEATURE_SSE2 = 3,
+ NPY_CPU_FEATURE_SSE3 = 4,
+ NPY_CPU_FEATURE_SSSE3 = 5,
+ NPY_CPU_FEATURE_SSE41 = 6,
+ NPY_CPU_FEATURE_POPCNT = 7,
+ NPY_CPU_FEATURE_SSE42 = 8,
+ NPY_CPU_FEATURE_AVX = 9,
+ NPY_CPU_FEATURE_F16C = 10,
+ NPY_CPU_FEATURE_XOP = 11,
+ NPY_CPU_FEATURE_FMA4 = 12,
+ NPY_CPU_FEATURE_FMA3 = 13,
+ NPY_CPU_FEATURE_AVX2 = 14,
+ NPY_CPU_FEATURE_FMA = 15, // AVX2 & FMA3, provides backward compatibility
+
+ NPY_CPU_FEATURE_AVX512F = 30,
+ NPY_CPU_FEATURE_AVX512CD = 31,
+ NPY_CPU_FEATURE_AVX512ER = 32,
+ NPY_CPU_FEATURE_AVX512PF = 33,
+ NPY_CPU_FEATURE_AVX5124FMAPS = 34,
+ NPY_CPU_FEATURE_AVX5124VNNIW = 35,
+ NPY_CPU_FEATURE_AVX512VPOPCNTDQ = 36,
+ NPY_CPU_FEATURE_AVX512BW = 37,
+ NPY_CPU_FEATURE_AVX512DQ = 38,
+ NPY_CPU_FEATURE_AVX512VL = 39,
+ NPY_CPU_FEATURE_AVX512IFMA = 40,
+ NPY_CPU_FEATURE_AVX512VBMI = 41,
+ NPY_CPU_FEATURE_AVX512VNNI = 42,
+ NPY_CPU_FEATURE_AVX512VBMI2 = 43,
+ NPY_CPU_FEATURE_AVX512BITALG = 44,
+
+ // X86 CPU Groups
+ // Knights Landing (F,CD,ER,PF)
+ NPY_CPU_FEATURE_AVX512_KNL = 101,
+ // Knights Mill (F,CD,ER,PF,4FMAPS,4VNNIW,VPOPCNTDQ)
+ NPY_CPU_FEATURE_AVX512_KNM = 102,
+ // Skylake-X (F,CD,BW,DQ,VL)
+ NPY_CPU_FEATURE_AVX512_SKX = 103,
+ // Cascade Lake (F,CD,BW,DQ,VL,VNNI)
+ NPY_CPU_FEATURE_AVX512_CLX = 104,
+ // Cannon Lake (F,CD,BW,DQ,VL,IFMA,VBMI)
+ NPY_CPU_FEATURE_AVX512_CNL = 105,
+ // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ)
+ NPY_CPU_FEATURE_AVX512_ICL = 106,
+
+ // IBM/POWER VSX
+ // POWER7
+ NPY_CPU_FEATURE_VSX = 200,
+ // POWER8
+ NPY_CPU_FEATURE_VSX2 = 201,
+ // POWER9
+ NPY_CPU_FEATURE_VSX3 = 202,
+
+ // ARM
+ NPY_CPU_FEATURE_NEON = 300,
+ NPY_CPU_FEATURE_NEON_FP16 = 301,
+ // FMA
+ NPY_CPU_FEATURE_NEON_VFPV4 = 302,
+ // Advanced SIMD
+ NPY_CPU_FEATURE_ASIMD = 303,
+ // ARMv8.2 half-precision
+ NPY_CPU_FEATURE_FPHP = 304,
+ // ARMv8.2 half-precision vector arithm
+ NPY_CPU_FEATURE_ASIMDHP = 305,
+ // ARMv8.2 dot product
+ NPY_CPU_FEATURE_ASIMDDP = 306,
+ // ARMv8.2 single&half-precision multiply
+ NPY_CPU_FEATURE_ASIMDFHM = 307,
+
+ NPY_CPU_FEATURE_MAX
+};
+
+/*
+ * Initialize CPU features
+ * return 0 on success otherwise return -1
+*/
+NPY_VISIBILITY_HIDDEN int
+npy_cpu_init(void);
+
+/*
+ * return 0 if CPU feature isn't available
+ * note: `npy_cpu_init` must be called first otherwise it will always return 0
+*/
+NPY_VISIBILITY_HIDDEN int
+npy_cpu_have(int feature_id);
+
+#define NPY_CPU_HAVE(FEATURE_NAME) \
+npy_cpu_have(NPY_CPU_FEATURE_##FEATURE_NAME)
+
+/*
+ * return a new dictionary contains CPU feature names
+ * with runtime availability.
+ * same as npy_cpu_have, `npy_cpu_init` must be called first.
+ */
+NPY_VISIBILITY_HIDDEN PyObject *
+npy_cpu_features_dict(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // _NPY_CPU_FEATURES_H_
diff --git a/numpy/core/src/common/npy_longdouble.c b/numpy/core/src/common/npy_longdouble.c
index c580e0cce..260e02a64 100644
--- a/numpy/core/src/common/npy_longdouble.c
+++ b/numpy/core/src/common/npy_longdouble.c
@@ -100,16 +100,12 @@ done:
static PyObject *
_PyLong_Bytes(PyObject *long_obj) {
PyObject *bytes;
-#if defined(NPY_PY3K)
PyObject *unicode = PyObject_Str(long_obj);
if (unicode == NULL) {
return NULL;
}
bytes = PyUnicode_AsUTF8String(unicode);
Py_DECREF(unicode);
-#else
- bytes = PyObject_Str(long_obj);
-#endif
return bytes;
}
diff --git a/numpy/core/src/common/numpyos.c b/numpy/core/src/common/numpyos.c
index d60b1ca17..7a629f46f 100644
--- a/numpy/core/src/common/numpyos.c
+++ b/numpy/core/src/common/numpyos.c
@@ -283,7 +283,7 @@ fix_ascii_format(char* buf, size_t buflen, int decimal)
* converting.
* - value: The value to convert
* - decimal: if != 0, always has a decimal, and at leasat one digit after
- * the decimal. This has the same effect as passing 'Z' in the origianl
+ * the decimal. This has the same effect as passing 'Z' in the original
* PyOS_ascii_formatd
*
* This is similar to PyOS_ascii_formatd in python > 2.6, except that it does
diff --git a/numpy/core/src/common/python_xerbla.c b/numpy/core/src/common/python_xerbla.c
index bdf0b9058..fe2f718b2 100644
--- a/numpy/core/src/common/python_xerbla.c
+++ b/numpy/core/src/common/python_xerbla.c
@@ -1,10 +1,6 @@
#include "Python.h"
-
-/*
- * From f2c.h, this should be safe unless fortran is set to use 64
- * bit integers. We don't seem to have any good way to detect that.
- */
-typedef int integer;
+#include "numpy/npy_common.h"
+#include "npy_cblas.h"
/*
From the original manpage:
@@ -23,7 +19,7 @@ typedef int integer;
info: Number of the invalid parameter.
*/
-int xerbla_(char *srname, integer *info)
+CBLAS_INT BLAS_FUNC(xerbla)(char *srname, CBLAS_INT *info)
{
static const char format[] = "On entry to %.*s" \
" parameter number %d had an illegal value";
@@ -41,7 +37,7 @@ int xerbla_(char *srname, integer *info)
#ifdef WITH_THREAD
save = PyGILState_Ensure();
#endif
- PyOS_snprintf(buf, sizeof(buf), format, len, srname, *info);
+ PyOS_snprintf(buf, sizeof(buf), format, len, srname, (int)*info);
PyErr_SetString(PyExc_ValueError, buf);
#ifdef WITH_THREAD
PyGILState_Release(save);
diff --git a/numpy/core/src/common/ucsnarrow.c b/numpy/core/src/common/ucsnarrow.c
index 8e293e9f2..3ef5d6878 100644
--- a/numpy/core/src/common/ucsnarrow.c
+++ b/numpy/core/src/common/ucsnarrow.c
@@ -16,76 +16,12 @@
#include "ctors.h"
/*
- * Functions only needed on narrow builds of Python for converting back and
- * forth between the NumPy Unicode data-type (always 4-bytes) and the
- * Python Unicode scalar (2-bytes on a narrow build).
- */
-
-/*
- * The ucs2 buffer must be large enough to hold 2*ucs4length characters
- * due to the use of surrogate pairs.
+ * This file originally contained functions only needed on narrow builds of
+ * Python for converting back and forth between the NumPy Unicode data-type
+ * (always 4-bytes) and the Python Unicode scalar (2-bytes on a narrow build).
*
- * The return value is the number of ucs2 bytes used-up which
- * is ucs4length + number of surrogate pairs found.
- *
- * Values above 0xffff are converted to surrogate pairs.
+ * This "narrow" interface is now deprecated in python and unused in NumPy.
*/
-NPY_NO_EXPORT int
-PyUCS2Buffer_FromUCS4(Py_UNICODE *ucs2, npy_ucs4 *ucs4, int ucs4length)
-{
- int i;
- int numucs2 = 0;
- npy_ucs4 chr;
- for (i = 0; i < ucs4length; i++) {
- chr = *ucs4++;
- if (chr > 0xffff) {
- numucs2++;
- chr -= 0x10000L;
- *ucs2++ = 0xD800 + (Py_UNICODE) (chr >> 10);
- *ucs2++ = 0xDC00 + (Py_UNICODE) (chr & 0x03FF);
- }
- else {
- *ucs2++ = (Py_UNICODE) chr;
- }
- numucs2++;
- }
- return numucs2;
-}
-
-
-/*
- * This converts a UCS2 buffer of the given length to UCS4 buffer.
- * It converts up to ucs4len characters of UCS2
- *
- * It returns the number of characters converted which can
- * be less than ucs2len if there are surrogate pairs in ucs2.
- *
- * The return value is the actual size of the used part of the ucs4 buffer.
- */
-NPY_NO_EXPORT int
-PyUCS2Buffer_AsUCS4(Py_UNICODE *ucs2, npy_ucs4 *ucs4, int ucs2len, int ucs4len)
-{
- int i;
- npy_ucs4 chr;
- Py_UNICODE ch;
- int numchars=0;
-
- for (i = 0; (i < ucs2len) && (numchars < ucs4len); i++) {
- ch = *ucs2++;
- if (ch >= 0xd800 && ch <= 0xdfff) {
- /* surrogate pair */
- chr = ((npy_ucs4)(ch-0xd800)) << 10;
- chr += *ucs2++ + 0x2400; /* -0xdc00 + 0x10000 */
- i++;
- }
- else {
- chr = (npy_ucs4) ch;
- }
- *ucs4++ = chr;
- numchars++;
- }
- return numchars;
-}
/*
* Returns a PyUnicodeObject initialized from a buffer containing
@@ -107,68 +43,32 @@ PyUCS2Buffer_AsUCS4(Py_UNICODE *ucs2, npy_ucs4 *ucs4, int ucs2len, int ucs4len)
* new_reference: PyUnicodeObject
*/
NPY_NO_EXPORT PyUnicodeObject *
-PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align)
+PyUnicode_FromUCS4(char const *src_char, Py_ssize_t size, int swap, int align)
{
Py_ssize_t ucs4len = size / sizeof(npy_ucs4);
- npy_ucs4 *buf = (npy_ucs4 *)src;
- int alloc = 0;
- PyUnicodeObject *ret;
+ npy_ucs4 const *src = (npy_ucs4 const *)src_char;
+ npy_ucs4 *buf = NULL;
/* swap and align if needed */
if (swap || align) {
buf = (npy_ucs4 *)malloc(size);
if (buf == NULL) {
PyErr_NoMemory();
- goto fail;
+ return NULL;
}
- alloc = 1;
memcpy(buf, src, size);
if (swap) {
byte_swap_vector(buf, ucs4len, sizeof(npy_ucs4));
}
+ src = buf;
}
/* trim trailing zeros */
- while (ucs4len > 0 && buf[ucs4len - 1] == 0) {
+ while (ucs4len > 0 && src[ucs4len - 1] == 0) {
ucs4len--;
}
-
- /* produce PyUnicode object */
-#ifdef Py_UNICODE_WIDE
- {
- ret = (PyUnicodeObject *)PyUnicode_FromUnicode((Py_UNICODE*)buf,
- (Py_ssize_t) ucs4len);
- if (ret == NULL) {
- goto fail;
- }
- }
-#else
- {
- Py_ssize_t tmpsiz = 2 * sizeof(Py_UNICODE) * ucs4len;
- Py_ssize_t ucs2len;
- Py_UNICODE *tmp;
-
- if ((tmp = (Py_UNICODE *)malloc(tmpsiz)) == NULL) {
- PyErr_NoMemory();
- goto fail;
- }
- ucs2len = PyUCS2Buffer_FromUCS4(tmp, buf, ucs4len);
- ret = (PyUnicodeObject *)PyUnicode_FromUnicode(tmp, (Py_ssize_t) ucs2len);
- free(tmp);
- if (ret == NULL) {
- goto fail;
- }
- }
-#endif
-
- if (alloc) {
- free(buf);
- }
+ PyUnicodeObject *ret = (PyUnicodeObject *)PyUnicode_FromKindAndData(
+ PyUnicode_4BYTE_KIND, src, ucs4len);
+ free(buf);
return ret;
-
-fail:
- if (alloc) {
- free(buf);
- }
- return NULL;
}
diff --git a/numpy/core/src/common/ucsnarrow.h b/numpy/core/src/common/ucsnarrow.h
index fe31a5e25..c811e1f2c 100644
--- a/numpy/core/src/common/ucsnarrow.h
+++ b/numpy/core/src/common/ucsnarrow.h
@@ -1,12 +1,6 @@
#ifndef _NPY_UCSNARROW_H_
#define _NPY_UCSNARROW_H_
-NPY_NO_EXPORT int
-PyUCS2Buffer_FromUCS4(Py_UNICODE *ucs2, npy_ucs4 *ucs4, int ucs4length);
-
-NPY_NO_EXPORT int
-PyUCS2Buffer_AsUCS4(Py_UNICODE *ucs2, npy_ucs4 *ucs4, int ucs2len, int ucs4len);
-
NPY_NO_EXPORT PyUnicodeObject *
PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align);
diff --git a/numpy/core/src/common/ufunc_override.c b/numpy/core/src/common/ufunc_override.c
index 89f08a9cb..d510f185a 100644
--- a/numpy/core/src/common/ufunc_override.c
+++ b/numpy/core/src/common/ufunc_override.c
@@ -36,6 +36,9 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj)
*/
cls_array_ufunc = PyArray_LookupSpecial(obj, "__array_ufunc__");
if (cls_array_ufunc == NULL) {
+ if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
return NULL;
}
/* Ignore if the same as ndarray.__array_ufunc__ */
@@ -91,8 +94,11 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject *
return -1;
}
/* borrowed reference */
- *out_kwd_obj = PyDict_GetItemString(kwds, "out");
+ *out_kwd_obj = _PyDict_GetItemStringWithError(kwds, "out");
if (*out_kwd_obj == NULL) {
+ if (PyErr_Occurred()) {
+ return -1;
+ }
Py_INCREF(Py_None);
*out_kwd_obj = Py_None;
return 0;
diff --git a/numpy/core/src/dummymodule.c b/numpy/core/src/dummymodule.c
index 718199f70..e26875736 100644
--- a/numpy/core/src/dummymodule.c
+++ b/numpy/core/src/dummymodule.c
@@ -16,7 +16,6 @@ static struct PyMethodDef methods[] = {
};
-#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"dummy",
@@ -28,10 +27,8 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
/* Initialization function for the module */
-#if defined(NPY_PY3K)
PyMODINIT_FUNC PyInit__dummy(void) {
PyObject *m;
m = PyModule_Create(&moduledef);
@@ -40,9 +37,3 @@ PyMODINIT_FUNC PyInit__dummy(void) {
}
return m;
}
-#else
-PyMODINIT_FUNC
-init_dummy(void) {
- Py_InitModule("_dummy", methods);
-}
-#endif
diff --git a/numpy/core/src/multiarray/_datetime.h b/numpy/core/src/multiarray/_datetime.h
index 3db1254d4..20f7a132c 100644
--- a/numpy/core/src/multiarray/_datetime.h
+++ b/numpy/core/src/multiarray/_datetime.h
@@ -1,7 +1,7 @@
#ifndef _NPY_PRIVATE__DATETIME_H_
#define _NPY_PRIVATE__DATETIME_H_
-extern NPY_NO_EXPORT char *_datetime_strings[NPY_DATETIME_NUMUNITS];
+extern NPY_NO_EXPORT char const *_datetime_strings[NPY_DATETIME_NUMUNITS];
extern NPY_NO_EXPORT int _days_per_month_table[2][12];
NPY_NO_EXPORT void
@@ -68,7 +68,7 @@ days_to_month_number(npy_datetime days);
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_datetime_metadata_from_metastr(char *metastr, Py_ssize_t len,
+parse_datetime_metadata_from_metastr(char const *metastr, Py_ssize_t len,
PyArray_DatetimeMetaData *out_meta);
@@ -78,7 +78,7 @@ parse_datetime_metadata_from_metastr(char *metastr, Py_ssize_t len,
* contain its string length.
*/
NPY_NO_EXPORT PyArray_Descr *
-parse_dtype_from_datetime_typestr(char *typestr, Py_ssize_t len);
+parse_dtype_from_datetime_typestr(char const *typestr, Py_ssize_t len);
/*
* Converts a substring given by 'str' and 'len' into
@@ -88,7 +88,7 @@ parse_dtype_from_datetime_typestr(char *typestr, Py_ssize_t len);
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT NPY_DATETIMEUNIT
-parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr);
+parse_datetime_unit_from_string(char const *str, Py_ssize_t len, char const *metastr);
/*
* Translate divisors into multiples of smaller units.
@@ -99,7 +99,7 @@ parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr);
*/
NPY_NO_EXPORT int
convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta,
- int den, char *metastr);
+ int den, char const *metastr);
/*
* Determines whether the 'divisor' metadata divides evenly into
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index fa2efb428..ec2928c8f 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -55,7 +55,7 @@ EXPORT(void*) forward_pointer(void *x)
* #typenum = NPY_DOUBLE, NPY_INT#
*/
static int copy_@name@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx,
- npy_intp *bounds,
+ npy_intp const *bounds,
PyObject **out)
{
npy_intp i, j;
@@ -97,7 +97,7 @@ static int copy_@name@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni
/**end repeat**/
static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx,
- npy_intp *bounds,
+ npy_intp const *bounds,
PyObject **out)
{
npy_intp i, j;
@@ -251,7 +251,7 @@ clean_ax:
static int
copy_double_double(PyArrayNeighborhoodIterObject *itx,
PyArrayNeighborhoodIterObject *niterx,
- npy_intp *bounds,
+ npy_intp const *bounds,
PyObject **out)
{
npy_intp i, j;
@@ -771,30 +771,6 @@ npy_discard(PyObject* NPY_UNUSED(self), PyObject* args)
Py_RETURN_NONE;
}
-#if !defined(NPY_PY3K)
-static PyObject *
-int_subclass(PyObject *dummy, PyObject *args)
-{
-
- PyObject *result = NULL;
- PyObject *scalar_object = NULL;
-
- if (!PyArg_UnpackTuple(args, "test_int_subclass", 1, 1, &scalar_object))
- return NULL;
-
- if (PyInt_Check(scalar_object))
- result = Py_True;
- else
- result = Py_False;
-
- Py_INCREF(result);
-
- return result;
-
-}
-#endif
-
-
/*
* Create python string from a FLAG and or the corresponding PyBuf flag
* for the use in get_buffer_info.
@@ -1210,11 +1186,7 @@ array_solve_diophantine(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject
for (j = 0; j < nterms; ++j) {
PyObject *obj;
-#if defined(NPY_PY3K)
obj = PyLong_FromSsize_t(x[j]);
-#else
- obj = PyInt_FromSsize_t(x[j]);
-#endif
if (obj == NULL) {
goto fail;
}
@@ -1905,21 +1877,21 @@ PrintFloat_Printf_g(PyObject *obj, int precision)
char str[1024];
if (PyArray_IsScalar(obj, Half)) {
- npy_half x = ((PyHalfScalarObject *)obj)->obval;
+ npy_half x = PyArrayScalar_VAL(obj, Half);
PyOS_snprintf(str, sizeof(str), "%.*g", precision,
npy_half_to_double(x));
}
else if (PyArray_IsScalar(obj, Float)) {
- npy_float x = ((PyFloatScalarObject *)obj)->obval;
+ npy_float x = PyArrayScalar_VAL(obj, Float);
PyOS_snprintf(str, sizeof(str), "%.*g", precision, x);
}
else if (PyArray_IsScalar(obj, Double)) {
- npy_double x = ((PyDoubleScalarObject *)obj)->obval;
+ npy_double x = PyArrayScalar_VAL(obj, Double);
PyOS_snprintf(str, sizeof(str), "%.*g", precision, x);
/* would be better to use lg, but not available in C90 */
}
else if (PyArray_IsScalar(obj, LongDouble)) {
- npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval;
+ npy_longdouble x = PyArrayScalar_VAL(obj, LongDouble);
PyOS_snprintf(str, sizeof(str), "%.*Lg", precision, x);
}
else{
@@ -2018,11 +1990,6 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"npy_discard",
npy_discard,
METH_O, NULL},
-#if !defined(NPY_PY3K)
- {"test_int_subclass",
- int_subclass,
- METH_VARARGS, NULL},
-#endif
{"get_buffer_info",
get_buffer_info,
METH_VARARGS, NULL},
@@ -2126,7 +2093,6 @@ static PyMethodDef Multiarray_TestsMethods[] = {
};
-#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_multiarray_tests",
@@ -2138,33 +2104,21 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
-#if defined(NPY_PY3K)
-#define RETVAL m
PyMODINIT_FUNC PyInit__multiarray_tests(void)
-#else
-#define RETVAL
-PyMODINIT_FUNC
-init_multiarray_tests(void)
-#endif
{
PyObject *m;
-#if defined(NPY_PY3K)
m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule("_multiarray_tests", Multiarray_TestsMethods);
-#endif
if (m == NULL) {
- return RETVAL;
+ return m;
}
import_array();
if (PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"cannot load _multiarray_tests module.");
}
- return RETVAL;
+ return m;
}
NPY_NO_EXPORT int
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index a7f34cbe5..c2b7e9ca7 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -48,11 +48,6 @@ static cache_bucket datacache[NBUCKETS];
static cache_bucket dimcache[NBUCKETS_DIM];
/* as the cache is managed in global variables verify the GIL is held */
-#if defined(NPY_PY3K)
-#define NPY_CHECK_GIL_HELD() PyGILState_Check()
-#else
-#define NPY_CHECK_GIL_HELD() 1
-#endif
/*
* very simplistic small memory block cache to avoid more expensive libc
@@ -67,7 +62,7 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz,
void * p;
assert((esz == 1 && cache == datacache) ||
(esz == sizeof(npy_intp) && cache == dimcache));
- assert(NPY_CHECK_GIL_HELD());
+ assert(PyGILState_Check());
if (nelem < msz) {
if (cache[nelem].available > 0) {
return cache[nelem].ptrs[--(cache[nelem].available)];
@@ -102,7 +97,7 @@ static NPY_INLINE void
_npy_free_cache(void * p, npy_uintp nelem, npy_uint msz,
cache_bucket * cache, void (*dealloc)(void *))
{
- assert(NPY_CHECK_GIL_HELD());
+ assert(PyGILState_Check());
if (p != NULL && nelem < msz) {
if (cache[nelem].available < NCACHE) {
cache[nelem].ptrs[cache[nelem].available++] = p;
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index 7ff33ebd7..e40b6c719 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -29,8 +29,8 @@
* elements, as required by the copy/casting code in lowlevel_strided_loops.c
*/
NPY_NO_EXPORT int
-copycast_isaligned(int ndim, npy_intp *shape,
- PyArray_Descr *dtype, char *data, npy_intp *strides)
+copycast_isaligned(int ndim, npy_intp const *shape,
+ PyArray_Descr *dtype, char *data, npy_intp const *strides)
{
int aligned;
int big_aln, small_aln;
@@ -72,9 +72,9 @@ copycast_isaligned(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_assign_array(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
- PyArray_Descr *src_dtype, char *src_data, npy_intp *src_strides)
+raw_array_assign_array(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
+ PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides)
{
int idim;
npy_intp shape_it[NPY_MAXDIMS];
@@ -152,11 +152,11 @@ raw_array_assign_array(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_wheremasked_assign_array(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
- PyArray_Descr *src_dtype, char *src_data, npy_intp *src_strides,
+raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
+ PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides,
PyArray_Descr *wheremask_dtype, char *wheremask_data,
- npy_intp *wheremask_strides)
+ npy_intp const *wheremask_strides)
{
int idim;
npy_intp shape_it[NPY_MAXDIMS];
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index ecb5be47b..6bc9bcfee 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -30,8 +30,8 @@
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_assign_scalar(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+raw_array_assign_scalar(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
PyArray_Descr *src_dtype, char *src_data)
{
int idim;
@@ -101,11 +101,11 @@ raw_array_assign_scalar(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
PyArray_Descr *src_dtype, char *src_data,
PyArray_Descr *wheremask_dtype, char *wheremask_data,
- npy_intp *wheremask_strides)
+ npy_intp const *wheremask_strides)
{
int idim;
npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS];
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
index 62e597764..9ea8efdd9 100644
--- a/numpy/core/src/multiarray/arrayfunction_override.c
+++ b/numpy/core/src/multiarray/arrayfunction_override.c
@@ -26,6 +26,7 @@ static PyObject *
get_array_function(PyObject *obj)
{
static PyObject *ndarray_array_function = NULL;
+ PyObject *array_function;
if (ndarray_array_function == NULL) {
ndarray_array_function = get_ndarray_array_function();
@@ -37,7 +38,12 @@ get_array_function(PyObject *obj)
return ndarray_array_function;
}
- return PyArray_LookupSpecial(obj, "__array_function__");
+ array_function = PyArray_LookupSpecial(obj, "__array_function__");
+ if (array_function == NULL && PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
+
+ return array_function;
}
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 4e229e321..dedaf38eb 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -41,6 +41,7 @@ maintainer email: oliphant.travis@ieee.org
#include "arraytypes.h"
#include "scalartypes.h"
#include "arrayobject.h"
+#include "conversion_utils.h"
#include "ctors.h"
#include "methods.h"
#include "descriptor.h"
@@ -48,7 +49,7 @@ maintainer email: oliphant.travis@ieee.org
#include "mapping.h"
#include "getset.h"
#include "sequence.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "array_assign.h"
#include "alloc.h"
#include "mem_overlap.h"
@@ -278,8 +279,8 @@ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object)
* Get either an array object we can copy from, or its parameters
* if there isn't a convenient array available.
*/
- if (PyArray_GetArrayParamsFromObject(src_object, PyArray_DESCR(dest),
- 0, &dtype, &ndim, dims, &src, NULL) < 0) {
+ if (PyArray_GetArrayParamsFromObject_int(src_object,
+ PyArray_DESCR(dest), 0, &dtype, &ndim, dims, &src) < 0) {
Py_DECREF(src_object);
return -1;
}
@@ -388,7 +389,7 @@ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object)
/*NUMPY_API
*/
NPY_NO_EXPORT int
-PyArray_TypeNumFromName(char *str)
+PyArray_TypeNumFromName(char const *str)
{
int i;
PyArray_Descr *descr;
@@ -557,7 +558,7 @@ PyArray_DebugPrint(PyArrayObject *obj)
printf(" ndim : %d\n", fobj->nd);
printf(" shape :");
for (i = 0; i < fobj->nd; ++i) {
- printf(" %d", (int)fobj->dimensions[i]);
+ printf(" %" NPY_INTP_FMT, fobj->dimensions[i]);
}
printf("\n");
@@ -567,7 +568,7 @@ PyArray_DebugPrint(PyArrayObject *obj)
printf(" data : %p\n", fobj->data);
printf(" strides:");
for (i = 0; i < fobj->nd; ++i) {
- printf(" %d", (int)fobj->strides[i]);
+ printf(" %" NPY_INTP_FMT, fobj->strides[i]);
}
printf("\n");
@@ -614,7 +615,7 @@ PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op))
/*NUMPY_API
*/
NPY_NO_EXPORT int
-PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
+PyArray_CompareUCS4(npy_ucs4 const *s1, npy_ucs4 const *s2, size_t len)
{
npy_ucs4 c1, c2;
while(len-- > 0) {
@@ -703,35 +704,40 @@ PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name)
If they are NULL terminated, then stop comparison.
*/
static int
-_myunincmp(npy_ucs4 *s1, npy_ucs4 *s2, int len1, int len2)
+_myunincmp(npy_ucs4 const *s1, npy_ucs4 const *s2, int len1, int len2)
{
- npy_ucs4 *sptr;
- npy_ucs4 *s1t=s1, *s2t=s2;
+ npy_ucs4 const *sptr;
+ npy_ucs4 *s1t = NULL;
+ npy_ucs4 *s2t = NULL;
int val;
npy_intp size;
int diff;
+ /* Replace `s1` and `s2` with aligned copies if needed */
if ((npy_intp)s1 % sizeof(npy_ucs4) != 0) {
size = len1*sizeof(npy_ucs4);
s1t = malloc(size);
memcpy(s1t, s1, size);
+ s1 = s1t;
}
if ((npy_intp)s2 % sizeof(npy_ucs4) != 0) {
size = len2*sizeof(npy_ucs4);
s2t = malloc(size);
memcpy(s2t, s2, size);
+ s2 = s1t;
}
- val = PyArray_CompareUCS4(s1t, s2t, PyArray_MIN(len1,len2));
+
+ val = PyArray_CompareUCS4(s1, s2, PyArray_MIN(len1,len2));
if ((val != 0) || (len1 == len2)) {
goto finish;
}
if (len2 > len1) {
- sptr = s2t+len1;
+ sptr = s2+len1;
val = -1;
diff = len2-len1;
}
else {
- sptr = s1t+len2;
+ sptr = s1+len2;
val = 1;
diff=len1-len2;
}
@@ -744,10 +750,11 @@ _myunincmp(npy_ucs4 *s1, npy_ucs4 *s2, int len1, int len2)
val = 0;
finish:
- if (s1t != s1) {
+ /* Cleanup the aligned copies */
+ if (s1t) {
free(s1t);
}
- if (s2t != s2) {
+ if (s2t) {
free(s2t);
}
return val;
@@ -763,9 +770,9 @@ _myunincmp(npy_ucs4 *s1, npy_ucs4 *s2, int len1, int len2)
* If they are NULL terminated, then stop comparison.
*/
static int
-_mystrncmp(char *s1, char *s2, int len1, int len2)
+_mystrncmp(char const *s1, char const *s2, int len1, int len2)
{
- char *sptr;
+ char const *sptr;
int val;
int diff;
@@ -827,7 +834,7 @@ static void _unistripw(npy_ucs4 *s, int n)
static char *
-_char_copy_n_strip(char *original, char *temp, int nc)
+_char_copy_n_strip(char const *original, char *temp, int nc)
{
if (nc > SMALL_STRING) {
temp = malloc(nc);
@@ -850,7 +857,7 @@ _char_release(char *ptr, int nc)
}
static char *
-_uni_copy_n_strip(char *original, char *temp, int nc)
+_uni_copy_n_strip(char const *original, char *temp, int nc)
{
if (nc*sizeof(npy_ucs4) > SMALL_STRING) {
temp = malloc(nc*sizeof(npy_ucs4));
@@ -919,7 +926,7 @@ _compare_strings(PyArrayObject *result, PyArrayMultiIterObject *multi,
int N1, N2;
int (*compfunc)(void *, void *, int, int);
void (*relfunc)(char *, int);
- char* (*stripfunc)(char *, char *, int);
+ char* (*stripfunc)(char const *, char *, int);
compfunc = func;
dptr = (npy_bool *)PyArray_DATA(result);
@@ -998,22 +1005,18 @@ _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op,
{
PyArrayObject *result;
PyArrayMultiIterObject *mit;
- int val, cast = 0;
+ int val;
/* Cast arrays to a common type */
if (PyArray_TYPE(self) != PyArray_DESCR(other)->type_num) {
-#if defined(NPY_PY3K)
/*
* Comparison between Bytes and Unicode is not defined in Py3K;
* we follow.
*/
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
-#else
- cast = 1;
-#endif /* define(NPY_PY3K) */
}
- if (cast || (PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(other))) {
+ if (PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(other)) {
PyObject *new;
if (PyArray_TYPE(self) == NPY_STRING &&
PyArray_DESCR(other)->type_num == NPY_UNICODE) {
@@ -1121,7 +1124,7 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op)
op = (cmp_op == Py_EQ ? n_ops.logical_and : n_ops.logical_or);
while (PyDict_Next(PyArray_DESCR(self)->fields, &pos, &key, &value)) {
- if NPY_TITLE_KEY(key, value) {
+ if (NPY_TITLE_KEY(key, value)) {
continue;
}
a = array_subscript_asarray(self, key);
@@ -1327,18 +1330,9 @@ _failed_comparison_workaround(PyArrayObject *self, PyObject *other, int cmp_op)
/*
* For LE, LT, GT, GE and a flexible self or other, we return
* NotImplemented, which is the correct answer since the ufuncs do
- * not in fact implement loops for those. On python 3 this will
- * get us the desired TypeError, but on python 2, one gets strange
- * ordering, so we emit a warning.
+ * not in fact implement loops for those. This will get us the
+ * desired TypeError.
*/
-#if !defined(NPY_PY3K)
- /* 2015-05-14, 1.10 */
- if (DEPRECATE(
- "unorderable dtypes; returning scalar but in "
- "the future this will be an error") < 0) {
- goto fail;
- }
-#endif
Py_XDECREF(exc);
Py_XDECREF(val);
Py_XDECREF(tb);
@@ -1544,9 +1538,8 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
* - If other is not convertible to an array, pass on the error
* (MHvK, 2018-06-18: not sure about this, but it's what we have).
*
- * However, for backwards compatibilty, we cannot yet return arrays,
- * so we raise warnings instead. Furthermore, we warn on python2
- * for LT, LE, GE, GT, since fall-back behaviour is poorly defined.
+ * However, for backwards compatibility, we cannot yet return arrays,
+ * so we raise warnings instead.
*/
result = _failed_comparison_workaround(self, other, cmp_op);
}
@@ -1601,7 +1594,7 @@ PyArray_ElementStrides(PyObject *obj)
/*NUMPY_API*/
NPY_NO_EXPORT npy_bool
PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp offset,
- npy_intp *dims, npy_intp *newstrides)
+ npy_intp const *dims, npy_intp const *newstrides)
{
npy_intp begin, end;
npy_intp lower_offset;
@@ -1632,7 +1625,7 @@ array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
PyArray_Descr *descr = NULL;
int itemsize;
PyArray_Dims dims = {NULL, 0};
- PyArray_Dims strides = {NULL, 0};
+ PyArray_Dims strides = {NULL, -1};
PyArray_Chunk buffer;
npy_longlong offset = 0;
NPY_ORDER order = NPY_CORDER;
@@ -1653,7 +1646,7 @@ array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
PyArray_BufferConverter,
&buffer,
&offset,
- &PyArray_IntpConverter,
+ &PyArray_OptionalIntpConverter,
&strides,
&PyArray_OrderConverter,
&order)) {
@@ -1668,7 +1661,7 @@ array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
itemsize = descr->elsize;
- if (strides.ptr != NULL) {
+ if (strides.len != -1) {
npy_intp nb, off;
if (strides.len != dims.len) {
PyErr_SetString(PyExc_ValueError,
@@ -1788,71 +1781,29 @@ array_free(PyObject * v)
NPY_NO_EXPORT PyTypeObject PyArray_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.ndarray", /* tp_name */
- NPY_SIZEOF_PYARRAYOBJECT, /* tp_basicsize */
- 0, /* tp_itemsize */
+ .tp_name = "numpy.ndarray",
+ .tp_basicsize = NPY_SIZEOF_PYARRAYOBJECT,
/* methods */
- (destructor)array_dealloc, /* tp_dealloc */
- (printfunc)NULL, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- (reprfunc)array_repr, /* tp_repr */
- &array_as_number, /* tp_as_number */
- &array_as_sequence, /* tp_as_sequence */
- &array_as_mapping, /* tp_as_mapping */
+ .tp_dealloc = (destructor)array_dealloc,
+ .tp_repr = (reprfunc)array_repr,
+ .tp_as_number = &array_as_number,
+ .tp_as_sequence = &array_as_sequence,
+ .tp_as_mapping = &array_as_mapping,
/*
* The tp_hash slot will be set PyObject_HashNotImplemented when the
* module is loaded.
*/
- (hashfunc)0, /* tp_hash */
- (ternaryfunc)0, /* tp_call */
- (reprfunc)array_str, /* tp_str */
- (getattrofunc)0, /* tp_getattro */
- (setattrofunc)0, /* tp_setattro */
- &array_as_buffer, /* tp_as_buffer */
- (Py_TPFLAGS_DEFAULT
-#if !defined(NPY_PY3K)
- | Py_TPFLAGS_CHECKTYPES
- | Py_TPFLAGS_HAVE_NEWBUFFER
-#endif
- | Py_TPFLAGS_BASETYPE), /* tp_flags */
- 0, /* tp_doc */
-
- (traverseproc)0, /* tp_traverse */
- (inquiry)0, /* tp_clear */
- (richcmpfunc)array_richcompare, /* tp_richcompare */
- offsetof(PyArrayObject_fields, weakreflist), /* tp_weaklistoffset */
- (getiterfunc)array_iter, /* tp_iter */
- (iternextfunc)0, /* tp_iternext */
- array_methods, /* tp_methods */
- 0, /* tp_members */
- array_getsetlist, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)0, /* tp_init */
- (allocfunc)array_alloc, /* tp_alloc */
- (newfunc)array_new, /* tp_new */
- (freefunc)array_free, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_str = (reprfunc)array_str,
+ .tp_as_buffer = &array_as_buffer,
+ .tp_flags =(Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE),
+
+ .tp_richcompare = (richcmpfunc)array_richcompare,
+ .tp_weaklistoffset = offsetof(PyArrayObject_fields, weakreflist),
+ .tp_iter = (getiterfunc)array_iter,
+ .tp_methods = array_methods,
+ .tp_getset = array_getsetlist,
+ .tp_alloc = (allocfunc)array_alloc,
+ .tp_new = (newfunc)array_new,
+ .tp_free = (freefunc)array_free,
};
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 5d9e990e8..5e07f0df4 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -36,7 +36,7 @@
#include "cblasfuncs.h"
#include "npy_cblas.h"
-#include "buffer.h"
+#include "npy_buffer.h"
/* check for sequences, but ignore the types numpy considers scalars */
static NPY_INLINE npy_bool
@@ -210,7 +210,7 @@ static int
@type@ temp; /* ensures alignment */
if (PyArray_IsScalar(op, @kind@)) {
- temp = ((Py@kind@ScalarObject *)op)->obval;
+ temp = PyArrayScalar_VAL(op, @kind@);
}
else {
temp = (@type@)@func2@(op);
@@ -291,7 +291,7 @@ static int
}
if (PyArray_IsScalar(op, @kind@)){
- temp = ((Py@kind@ScalarObject *)op)->obval;
+ temp = PyArrayScalar_VAL(op, @kind@);
}
else {
if (op == Py_None) {
@@ -330,11 +330,7 @@ string_to_long_double(PyObject*op)
/* Convert python long objects to a longdouble, without precision or range
* loss via a double.
*/
- if ((PyLong_Check(op) && !PyBool_Check(op))
-#if !defined(NPY_PY3K)
- || (PyInt_Check(op) && !PyBool_Check(op))
-#endif
- ) {
+ if ((PyLong_Check(op) && !PyBool_Check(op))) {
return npy_longdouble_from_PyLong(op);
}
@@ -410,7 +406,7 @@ LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap)
}
if (PyArray_IsScalar(op, LongDouble)) {
- temp = ((PyLongDoubleScalarObject *)op)->obval;
+ temp = PyArrayScalar_VAL(op, LongDouble);
}
else {
/* In case something funny happened in PyArray_IsScalar */
@@ -454,12 +450,6 @@ static int
UNICODE_setitem(PyObject *op, void *ov, void *vap)
{
PyArrayObject *ap = vap;
- PyObject *temp;
- Py_UNICODE *ptr;
- int datalen;
-#ifndef Py_UNICODE_WIDE
- char *buffer;
-#endif
if (PyArray_IsZeroDim(op)) {
return convert_to_scalar_and_retry(op, ov, vap, UNICODE_setitem);
@@ -470,7 +460,8 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap)
"setting an array element with a sequence");
return -1;
}
-#if defined(NPY_PY3K)
+
+ PyObject *temp;
if (PyBytes_Check(op)) {
/* Try to decode from ASCII */
temp = PyUnicode_FromEncodedObject(op, "ASCII", "strict");
@@ -479,23 +470,29 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap)
}
}
else if ((temp=PyObject_Str(op)) == NULL) {
-#else
- if ((temp=PyObject_Unicode(op)) == NULL) {
-#endif
return -1;
}
- ptr = PyUnicode_AS_UNICODE(temp);
- if ((ptr == NULL) || (PyErr_Occurred())) {
+
+ /* truncate if needed */
+ Py_ssize_t max_len = PyArray_DESCR(ap)->elsize >> 2;
+ Py_ssize_t actual_len = PyUnicode_GetLength(temp);
+ if (actual_len < 0) {
Py_DECREF(temp);
return -1;
}
- datalen = PyUnicode_GET_DATA_SIZE(temp);
+ if (actual_len > max_len) {
+ Py_SETREF(temp, PyUnicode_Substring(temp, 0, max_len));
+ if (temp == NULL) {
+ return -1;
+ }
+ actual_len = max_len;
+ }
-#ifdef Py_UNICODE_WIDE
- memcpy(ov, ptr, PyArray_MIN(PyArray_DESCR(ap)->elsize, datalen));
-#else
+ Py_ssize_t num_bytes = actual_len * 4;
+
+ char *buffer;
if (!PyArray_ISALIGNED(ap)) {
- buffer = PyArray_malloc(PyArray_DESCR(ap)->elsize);
+ buffer = PyArray_malloc(num_bytes);
if (buffer == NULL) {
Py_DECREF(temp);
PyErr_NoMemory();
@@ -505,20 +502,23 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap)
else {
buffer = ov;
}
- datalen = PyUCS2Buffer_AsUCS4(ptr, (npy_ucs4 *)buffer,
- datalen >> 1, PyArray_DESCR(ap)->elsize >> 2);
- datalen <<= 2;
+ if (PyUnicode_AsUCS4(temp, (Py_UCS4 *)buffer, actual_len, 0) == NULL) {
+ PyArray_free(buffer);
+ Py_DECREF(temp);
+ return -1;
+ }
+
if (!PyArray_ISALIGNED(ap)) {
- memcpy(ov, buffer, datalen);
+ memcpy(ov, buffer, num_bytes);
PyArray_free(buffer);
}
-#endif
+
/* Fill in the rest of the space with 0 */
- if (PyArray_DESCR(ap)->elsize > datalen) {
- memset((char*)ov + datalen, 0, (PyArray_DESCR(ap)->elsize - datalen));
+ if (PyArray_DESCR(ap)->elsize > num_bytes) {
+ memset((char*)ov + num_bytes, 0, (PyArray_DESCR(ap)->elsize - num_bytes));
}
if (PyArray_ISBYTESWAPPED(ap)) {
- byte_swap_vector(ov, PyArray_DESCR(ap)->elsize >> 2, 4);
+ byte_swap_vector(ov, actual_len, 4);
}
Py_DECREF(temp);
return 0;
@@ -561,7 +561,6 @@ STRING_setitem(PyObject *op, void *ov, void *vap)
"setting an array element with a sequence");
return -1;
}
-#if defined(NPY_PY3K)
if (PyUnicode_Check(op)) {
/* Assume ASCII codec -- function similarly as Python 2 */
temp = PyUnicode_AsASCIIString(op);
@@ -588,11 +587,6 @@ STRING_setitem(PyObject *op, void *ov, void *vap)
return -1;
}
}
-#else
- if ((temp = PyObject_Str(op)) == NULL) {
- return -1;
- }
-#endif
if (PyBytes_AsStringAndSize(temp, &ptr, &len) < 0) {
Py_DECREF(temp);
return -1;
@@ -748,7 +742,7 @@ _setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr,
}
((PyArrayObject_fields *)(arr))->descr = new;
- if ((new->alignment > 1) &&
+ if ((new->alignment > 1) &&
((((uintptr_t)dstdata + offset) % new->alignment) != 0)) {
PyArray_CLEARFLAGS(arr, NPY_ARRAY_ALIGNED);
}
@@ -836,7 +830,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
if (names_size != PyTuple_Size(op)) {
errmsg = PyUString_FromFormat(
"could not assign tuple of length %zd to structure "
- "with %" NPY_INTP_FMT " fields.",
+ "with %" NPY_INTP_FMT " fields.",
PyTuple_Size(op), names_size);
PyErr_SetObject(PyExc_ValueError, errmsg);
Py_DECREF(errmsg);
@@ -919,7 +913,6 @@ VOID_setitem(PyObject *op, void *input, void *vap)
* undiscerning case: It interprets any object as a buffer
* and reads as many bytes as possible, padding with 0.
*/
-#if defined(NPY_PY3K)
{
Py_buffer view;
@@ -933,20 +926,6 @@ VOID_setitem(PyObject *op, void *input, void *vap)
PyBuffer_Release(&view);
_dealloc_cached_buffer_info(op);
}
-#else
- {
- const void *buffer;
- Py_ssize_t buflen;
-
- if (PyObject_AsReadBuffer(op, &buffer, &buflen) < 0) {
- return -1;
- }
- memcpy(ip, buffer, PyArray_MIN(buflen, itemsize));
- if (itemsize > buflen) {
- memset(ip + buflen, 0, itemsize - buflen);
- }
- }
-#endif
return 0;
}
@@ -1081,6 +1060,7 @@ TIMEDELTA_setitem(PyObject *op, void *ov, void *vap)
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_float, npy_double, npy_longdouble,
* npy_datetime, npy_timedelta#
+ * #supports_nat = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1#
*/
/**begin repeat1
@@ -1092,6 +1072,7 @@ TIMEDELTA_setitem(PyObject *op, void *ov, void *vap)
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_float, npy_double, npy_longdouble,
* npy_datetime, npy_timedelta#
+ * #floatingpoint = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0#
*/
static void
@FROMTYPE@_to_@TOTYPE@(void *input, void *output, npy_intp n,
@@ -1101,7 +1082,15 @@ static void
@totype@ *op = output;
while (n--) {
- *op++ = (@totype@)*ip++;
+ @fromtype@ f = *ip++;
+ @totype@ t = (@totype@)f;
+#if @supports_nat@ && @floatingpoint@
+ /* Avoid undefined behaviour for NaN -> NaT */
+ if (npy_isnan(f)) {
+ t = (@totype@)NPY_DATETIME_NAT;
+ }
+#endif
+ *op++ = t;
}
}
/**end repeat1**/
@@ -1119,7 +1108,15 @@ static void
@totype@ *op = output;
while (n--) {
- *op++ = (@totype@)*ip;
+ @fromtype@ f = *ip;
+ @totype@ t = (@totype@)f;
+#if @supports_nat@
+ /* Avoid undefined behaviour for NaN -> NaT */
+ if (npy_isnan(f)) {
+ t = (@totype@)NPY_DATETIME_NAT;
+ }
+#endif
+ *op++ = t;
ip += 2;
}
}
@@ -1509,16 +1506,8 @@ OBJECT_to_@TOTYPE@(void *input, void *output, npy_intp n,
* #oskip = 1*18,(PyArray_DESCR(aop)->elsize)*3,1*2,
* 1*18,(PyArray_DESCR(aop)->elsize)*3,1*2,
* 1*18,(PyArray_DESCR(aop)->elsize)*3,1*2#
- * #convert = 1*18, 0*3, 1*2,
- * 1*18, 0*3, 1*2,
- * 0*23#
- * #convstr = (Int*9, Long*2, Float*4, Complex*3, Tuple*3, Long*2)*3#
*/
-#if @convert@
-
-#define IS_@from@
-
static void
@from@_to_@to@(void *input, void *output, npy_intp n,
void *vaip, void *aop)
@@ -1532,41 +1521,10 @@ static void
int oskip = @oskip@;
for (i = 0; i < n; i++, ip+=skip, op+=oskip) {
- PyObject *new;
PyObject *temp = PyArray_Scalar(ip, PyArray_DESCR(aip), (PyObject *)aip);
if (temp == NULL) {
return;
}
-
-#if defined(NPY_PY3K) && defined(IS_STRING)
- /* Work around some Python 3K */
- new = PyUnicode_FromEncodedObject(temp, "ascii", "strict");
- Py_DECREF(temp);
- temp = new;
- if (temp == NULL) {
- return;
- }
-#endif
- /* convert from Python object to needed one */
- {
- PyObject *args;
-
- /* call out to the Python builtin given by convstr */
- args = Py_BuildValue("(N)", temp);
-#if defined(NPY_PY3K)
-#define PyInt_Type PyLong_Type
-#endif
- new = Py@convstr@_Type.tp_new(&Py@convstr@_Type, args, NULL);
-#if defined(NPY_PY3K)
-#undef PyInt_Type
-#endif
- Py_DECREF(args);
- temp = new;
- if (temp == NULL) {
- return;
- }
- }
-
if (@to@_setitem(temp, op, aop)) {
Py_DECREF(temp);
return;
@@ -1575,36 +1533,6 @@ static void
}
}
-#undef IS_@from@
-
-#else
-
-static void
-@from@_to_@to@(void *input, void *output, npy_intp n,
- void *vaip, void *aop)
-{
- @fromtyp@ *ip = input;
- @totyp@ *op = output;
- PyArrayObject *aip = vaip;
-
- npy_intp i;
- int skip = PyArray_DESCR(aip)->elsize;
- int oskip = @oskip@;
-
- for (i = 0; i < n; i++, ip+=skip, op+=oskip) {
- PyObject *temp = PyArray_Scalar(ip, PyArray_DESCR(aip), (PyObject *)aip);
- if (temp == NULL) {
- return;
- }
- if (@to@_setitem(temp, op, aop)) {
- Py_DECREF(temp);
- return;
- }
- Py_DECREF(temp);
- }
-}
-
-#endif
/**end repeat**/
@@ -1757,7 +1685,58 @@ BOOL_scan(FILE *fp, npy_bool *ip, void *NPY_UNUSED(ignore),
}
/**begin repeat
- * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * #fname = CFLOAT, CDOUBLE#
+ * #type = npy_cfloat, npy_cdouble#
+ */
+static int
+@fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore),
+ PyArray_Descr *NPY_UNUSED(ignored))
+{
+ double result;
+ int ret_real, ret_imag;
+
+ ret_real = NumPyOS_ascii_ftolf(fp, &result);
+ @type@ output;
+ // Peek next character
+ char next = getc(fp);
+ if ((next == '+') || (next == '-')) {
+ // Imaginary component specified
+ output.real = result;
+ // Revert peek and read imaginary component
+ ungetc(next, fp);
+ ret_imag = NumPyOS_ascii_ftolf(fp, &result);
+ // Peak next character
+ next = getc(fp);
+ if ((ret_imag == 1) && (next == 'j')) {
+ // If read is successful and the immediate following char is j
+ output.imag = result;
+ }
+ else {
+ output.imag = 0;
+ // Push an invalid char to trigger the not everything is read error
+ ungetc('a', fp);
+ }
+ }
+ else if (next == 'j') {
+ // Real component not specified
+ output.real = 0;
+ output.imag = result;
+ }
+ else {
+ // Imaginary component not specified
+ output.real = result;
+ output.imag = 0.;
+ // Next character is not + / - / j. Revert peek.
+ ungetc(next, fp);
+ }
+ *(@type@ *)ip = output;
+ return ret_real;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ * #fname = CLONGDOUBLE,
* OBJECT, STRING, UNICODE, VOID,
* DATETIME, TIMEDELTA#
*/
@@ -1849,7 +1828,60 @@ BOOL_fromstr(char *str, void *ip, char **endptr,
}
/**begin repeat
- * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * #fname = CFLOAT, CDOUBLE#
+ * #type = npy_cfloat, npy_cdouble#
+ */
+static int
+@fname@_fromstr(char *str, void *ip, char **endptr,
+ PyArray_Descr *NPY_UNUSED(ignore))
+{
+ double result;
+
+ result = NumPyOS_ascii_strtod(str, endptr);
+ @type@ output;
+
+ if (endptr && ((*endptr[0] == '+') || (*endptr[0] == '-'))) {
+ // Imaginary component specified
+ output.real = result;
+ // Reading imaginary component
+ char **prev = endptr;
+ str = *endptr;
+ result = NumPyOS_ascii_strtod(str, endptr);
+ if (endptr && *endptr[0] == 'j') {
+ // Read is successful if the immediate following char is j
+ output.imag = result;
+ // Skip j
+ ++*endptr;
+ }
+ else {
+ /*
+ * Set endptr to previous char to trigger the not everything is
+ * read error
+ */
+ endptr = prev;
+ output.imag = 0;
+ }
+ }
+ else if (endptr && *endptr[0] == 'j') {
+ // Real component not specified
+ output.real = 0;
+ output.imag = result;
+ // Skip j
+ ++*endptr;
+ }
+ else {
+ // Imaginary component not specified
+ output.real = result;
+ output.imag = 0.;
+ }
+ *(@type@ *)ip = output;
+ return 0;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ * #fname = CLONGDOUBLE,
* OBJECT, STRING, UNICODE, VOID#
*/
@@ -2203,6 +2235,7 @@ static void
STRING_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
npy_intp n, int NPY_UNUSED(swap), PyArrayObject *arr)
{
+ assert(arr != NULL);
if (arr == NULL) {
return;
}
@@ -2217,6 +2250,7 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
{
PyArray_Descr *descr;
+ assert(arr != NULL);
if (arr == NULL) {
return;
}
@@ -2307,6 +2341,7 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
{
PyArray_Descr *descr;
+ assert(arr != NULL);
if (arr == NULL) {
return;
}
@@ -2388,6 +2423,7 @@ UNICODE_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
{
int itemsize;
+ assert(arr != NULL);
if (arr == NULL) {
return;
}
@@ -2415,6 +2451,7 @@ UNICODE_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
static void
STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr)
{
+ assert(arr != NULL);
if (arr == NULL) {
return;
}
@@ -2427,6 +2464,7 @@ UNICODE_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
{
int itemsize;
+ assert(arr != NULL);
if (arr == NULL) {
return;
}
@@ -2557,12 +2595,6 @@ STRING_nonzero (char *ip, PyArrayObject *ap)
return nonz;
}
-#ifdef Py_UNICODE_WIDE
-#define PyArray_UCS4_ISSPACE Py_UNICODE_ISSPACE
-#else
-#define PyArray_UCS4_ISSPACE(ch) Py_STRING_ISSPACE((char)ch)
-#endif
-
static npy_bool
UNICODE_nonzero (npy_ucs4 *ip, PyArrayObject *ap)
{
@@ -2588,7 +2620,7 @@ UNICODE_nonzero (npy_ucs4 *ip, PyArrayObject *ap)
if (*ip == '\0') {
seen_null = NPY_TRUE;
}
- else if (seen_null || !PyArray_UCS4_ISSPACE(*ip)) {
+ else if (seen_null || !Py_UNICODE_ISSPACE(*ip)) {
nonz = NPY_TRUE;
break;
}
@@ -2878,7 +2910,7 @@ OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap))
ret = PyObject_RichCompareBool(*ip1, *ip2, Py_LT);
if (ret < 0) {
- /* error occurred, avoid the next call to PyObject_RichCompareBool */
+ /* error occurred, avoid the next call to PyObject_RichCompareBool */
return 0;
}
if (ret == 1) {
@@ -3078,6 +3110,7 @@ BOOL_argmax(npy_bool *ip, npy_intp n, npy_intp *max_ind,
* #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8#
* #iscomplex = 0*14, 1*3, 0*2#
* #incr = ip++*14, ip+=2*3, ip++*2#
+ * #isdatetime = 0*17, 1*2#
*/
static int
@fname@_argmax(@type@ *ip, npy_intp n, npy_intp *max_ind,
@@ -3103,6 +3136,12 @@ static int
return 0;
}
#endif
+#if @isdatetime@
+ if (mp == NPY_DATETIME_NAT) {
+ /* NaT encountered, it's maximal */
+ return 0;
+ }
+#endif
for (i = 1; i < n; i++) {
@incr@;
@@ -3122,6 +3161,13 @@ static int
}
}
#else
+#if @isdatetime@
+ if (*ip == NPY_DATETIME_NAT) {
+ /* NaT encountered, it's maximal */
+ *max_ind = i;
+ break;
+ }
+#endif
if (!@le@(*ip, mp)) { /* negated, for correct nan handling */
mp = *ip;
*max_ind = i;
@@ -3158,16 +3204,19 @@ BOOL_argmin(npy_bool *ip, npy_intp n, npy_intp *min_ind,
* #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT,
* LONG, ULONG, LONGLONG, ULONGLONG,
* HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE#
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * DATETIME, TIMEDELTA#
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_half, npy_float, npy_double, npy_longdouble,
- * npy_float, npy_double, npy_longdouble#
- * #isfloat = 0*10, 1*7#
- * #isnan = nop*10, npy_half_isnan, npy_isnan*6#
- * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*6#
- * #iscomplex = 0*14, 1*3#
- * #incr = ip++*14, ip+=2*3#
+ * npy_float, npy_double, npy_longdouble,
+ * npy_datetime, npy_timedelta#
+ * #isfloat = 0*10, 1*7, 0*2#
+ * #isnan = nop*10, npy_half_isnan, npy_isnan*6, nop*2#
+ * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8#
+ * #iscomplex = 0*14, 1*3, 0*2#
+ * #incr = ip++*14, ip+=2*3, ip++*2#
+ * #isdatetime = 0*17, 1*2#
*/
static int
@fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind,
@@ -3193,6 +3242,12 @@ static int
return 0;
}
#endif
+#if @isdatetime@
+ if (mp == NPY_DATETIME_NAT) {
+ /* NaT encountered, it's minimal */
+ return 0;
+ }
+#endif
for (i = 1; i < n; i++) {
@incr@;
@@ -3212,6 +3267,13 @@ static int
}
}
#else
+#if @isdatetime@
+ if (*ip == NPY_DATETIME_NAT) {
+ /* NaT encountered, it's minimal */
+ *min_ind = i;
+ break;
+ }
+#endif
if (!@le@(mp, *ip)) { /* negated, for correct nan handling */
mp = *ip;
*min_ind = i;
@@ -3231,43 +3293,6 @@ static int
#undef _LESS_THAN_OR_EQUAL
-/**begin repeat
- *
- * #fname = DATETIME, TIMEDELTA#
- * #type = npy_datetime, npy_timedelta#
- */
-static int
-@fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind,
- PyArrayObject *NPY_UNUSED(aip))
-{
- /* NPY_DATETIME_NAT is smaller than every other value, we skip
- * it for consistency with min().
- */
- npy_intp i;
- @type@ mp = NPY_DATETIME_NAT;
-
- i = 0;
- while (i < n && mp == NPY_DATETIME_NAT) {
- mp = ip[i];
- i++;
- }
- if (i == n) {
- /* All NaTs: return 0 */
- *min_ind = 0;
- return 0;
- }
- *min_ind = i - 1;
- for (; i < n; i++) {
- if (mp > ip[i] && ip[i] != NPY_DATETIME_NAT) {
- mp = ip[i];
- *min_ind = i;
- }
- }
- return 0;
-}
-
-/**end repeat**/
-
static int
OBJECT_argmax(PyObject **ip, npy_intp n, npy_intp *max_ind,
PyArrayObject *NPY_UNUSED(aip))
@@ -3420,17 +3445,17 @@ NPY_NO_EXPORT void
npy_intp n, void *NPY_UNUSED(ignore))
{
#if defined(HAVE_CBLAS)
- int is1b = blas_stride(is1, sizeof(@type@));
- int is2b = blas_stride(is2, sizeof(@type@));
+ CBLAS_INT is1b = blas_stride(is1, sizeof(@type@));
+ CBLAS_INT is2b = blas_stride(is2, sizeof(@type@));
if (is1b && is2b)
{
double sum = 0.; /* double for stability */
while (n > 0) {
- int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
+ CBLAS_INT chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
- sum += cblas_@prefix@dot(chunk,
+ sum += CBLAS_FUNC(cblas_@prefix@dot)(chunk,
(@type@ *) ip1, is1b,
(@type@ *) ip2, is2b);
/* use char strides here */
@@ -3469,17 +3494,17 @@ NPY_NO_EXPORT void
char *op, npy_intp n, void *NPY_UNUSED(ignore))
{
#if defined(HAVE_CBLAS)
- int is1b = blas_stride(is1, sizeof(@ctype@));
- int is2b = blas_stride(is2, sizeof(@ctype@));
+ CBLAS_INT is1b = blas_stride(is1, sizeof(@ctype@));
+ CBLAS_INT is2b = blas_stride(is2, sizeof(@ctype@));
if (is1b && is2b) {
double sum[2] = {0., 0.}; /* double for stability */
while (n > 0) {
- int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
+ CBLAS_INT chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
@type@ tmp[2];
- cblas_@prefix@dotu_sub((int)n, ip1, is1b, ip2, is2b, tmp);
+ CBLAS_FUNC(cblas_@prefix@dotu_sub)((CBLAS_INT)n, ip1, is1b, ip2, is2b, tmp);
sum[0] += (double)tmp[0];
sum[1] += (double)tmp[1];
/* use char strides here */
@@ -3808,172 +3833,6 @@ static void
/*
*****************************************************************************
- ** FASTPUTMASK **
- *****************************************************************************
- */
-
-
-/**begin repeat
- *
- * #name = BOOL,
- * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
- * LONG, ULONG, LONGLONG, ULONGLONG,
- * HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE,
- * DATETIME, TIMEDELTA#
- * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
- * npy_cfloat, npy_cdouble, npy_clongdouble,
- * npy_datetime, npy_timedelta#
-*/
-static void
-@name@_fastputmask(@type@ *in, npy_bool *mask, npy_intp ni, @type@ *vals,
- npy_intp nv)
-{
- npy_intp i, j;
-
- if (nv == 1) {
- @type@ s_val = *vals;
- for (i = 0; i < ni; i++) {
- if (mask[i]) {
- in[i] = s_val;
- }
- }
- }
- else {
- for (i = 0, j = 0; i < ni; i++, j++) {
- if (j >= nv) {
- j = 0;
- }
- if (mask[i]) {
- in[i] = vals[j];
- }
- }
- }
- return;
-}
-/**end repeat**/
-
-#define OBJECT_fastputmask NULL
-
-
-/*
- *****************************************************************************
- ** FASTTAKE **
- *****************************************************************************
- */
-
-
-/**begin repeat
- *
- * #name = BOOL,
- * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
- * LONG, ULONG, LONGLONG, ULONGLONG,
- * HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE,
- * DATETIME, TIMEDELTA#
- * #type = npy_bool,
- * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
- * npy_cfloat, npy_cdouble, npy_clongdouble,
- * npy_datetime, npy_timedelta#
-*/
-static int
-@name@_fasttake(@type@ *dest, @type@ *src, npy_intp *indarray,
- npy_intp nindarray, npy_intp n_outer,
- npy_intp m_middle, npy_intp nelem,
- NPY_CLIPMODE clipmode)
-{
- npy_intp i, j, k, tmp;
- NPY_BEGIN_THREADS_DEF;
-
- NPY_BEGIN_THREADS;
-
- switch(clipmode) {
- case NPY_RAISE:
- for (i = 0; i < n_outer; i++) {
- for (j = 0; j < m_middle; j++) {
- tmp = indarray[j];
- /*
- * We don't know what axis we're operating on,
- * so don't report it in case of an error.
- */
- if (check_and_adjust_index(&tmp, nindarray, -1, _save) < 0) {
- return 1;
- }
- if (NPY_LIKELY(nelem == 1)) {
- *dest++ = *(src + tmp);
- }
- else {
- for (k = 0; k < nelem; k++) {
- *dest++ = *(src + tmp*nelem + k);
- }
- }
- }
- src += nelem*nindarray;
- }
- break;
- case NPY_WRAP:
- for (i = 0; i < n_outer; i++) {
- for (j = 0; j < m_middle; j++) {
- tmp = indarray[j];
- if (tmp < 0) {
- while (tmp < 0) {
- tmp += nindarray;
- }
- }
- else if (tmp >= nindarray) {
- while (tmp >= nindarray) {
- tmp -= nindarray;
- }
- }
- if (NPY_LIKELY(nelem == 1)) {
- *dest++ = *(src+tmp);
- }
- else {
- for (k = 0; k < nelem; k++) {
- *dest++ = *(src+tmp*nelem+k);
- }
- }
- }
- src += nelem*nindarray;
- }
- break;
- case NPY_CLIP:
- for (i = 0; i < n_outer; i++) {
- for (j = 0; j < m_middle; j++) {
- tmp = indarray[j];
- if (tmp < 0) {
- tmp = 0;
- }
- else if (tmp >= nindarray) {
- tmp = nindarray - 1;
- }
- if (NPY_LIKELY(nelem == 1)) {
- *dest++ = *(src + tmp);
- }
- else {
- for (k = 0; k < nelem; k++) {
- *dest++ = *(src + tmp*nelem + k);
- }
- }
- }
- src += nelem*nindarray;
- }
- break;
- }
-
- NPY_END_THREADS;
- return 0;
-}
-/**end repeat**/
-
-#define OBJECT_fasttake NULL
-
-/*
- *****************************************************************************
** small correlate **
*****************************************************************************
*/
@@ -4078,7 +3937,7 @@ _datetime_dtype_metadata_clone(NpyAuxData *data)
}
/*
- * Allcoate and initialize a PyArray_DatetimeDTypeMetaData object
+ * Allocate and initialize a PyArray_DatetimeDTypeMetaData object
*/
static NpyAuxData*
_create_datetime_metadata(NPY_DATETIMEUNIT base, int num)
@@ -4314,7 +4173,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = {
#if @rsort@
aradixsort_@suff@
#else
- atimsort_@suff@
+ atimsort_@suff@
#endif
},
#else
@@ -4330,8 +4189,8 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = {
NULL,
NULL,
(PyArray_FastClipFunc*)NULL,
- (PyArray_FastPutmaskFunc*)@from@_fastputmask,
- (PyArray_FastTakeFunc*)@from@_fasttake,
+ (PyArray_FastPutmaskFunc*)NULL,
+ (PyArray_FastTakeFunc*)NULL,
(PyArray_ArgFunc*)@from@_argmin
};
@@ -4607,7 +4466,7 @@ set_typeinfo(PyObject *dict)
infodict = PyDict_New();
if (infodict == NULL) return -1;
-
+ int ret;
/**begin repeat
*
* #name = BOOL,
@@ -4650,10 +4509,15 @@ set_typeinfo(PyObject *dict)
&Py@Name@ArrType_Type
);
if (s == NULL) {
+ Py_DECREF(infodict);
return -1;
}
- PyDict_SetItemString(infodict, "@name@", s);
+ ret = PyDict_SetItemString(infodict, "@name@", s);
Py_DECREF(s);
+ if (ret < 0) {
+ Py_DECREF(infodict);
+ return -1;
+ }
/**end repeat**/
@@ -4673,10 +4537,15 @@ set_typeinfo(PyObject *dict)
_ALIGN(@type@), &Py@Name@ArrType_Type
);
if (s == NULL) {
+ Py_DECREF(infodict);
return -1;
}
- PyDict_SetItemString(infodict, "@name@", s);
+ ret = PyDict_SetItemString(infodict, "@name@", s);
Py_DECREF(s);
+ if (ret < 0) {
+ Py_DECREF(infodict);
+ return -1;
+ }
/**end repeat**/
@@ -4686,37 +4555,57 @@ set_typeinfo(PyObject *dict)
&PyObjectArrType_Type
);
if (s == NULL) {
+ Py_DECREF(infodict);
return -1;
}
- PyDict_SetItemString(infodict, "OBJECT", s);
+ ret = PyDict_SetItemString(infodict, "OBJECT", s);
Py_DECREF(s);
+ if (ret < 0) {
+ Py_DECREF(infodict);
+ return -1;
+ }
s = PyArray_typeinfo(
NPY_STRINGLTR, NPY_STRING, 0, _ALIGN(char),
&PyStringArrType_Type
);
if (s == NULL) {
+ Py_DECREF(infodict);
return -1;
}
- PyDict_SetItemString(infodict, "STRING", s);
+ ret = PyDict_SetItemString(infodict, "STRING", s);
Py_DECREF(s);
+ if (ret < 0) {
+ Py_DECREF(infodict);
+ return -1;
+ }
s = PyArray_typeinfo(
NPY_UNICODELTR, NPY_UNICODE, 0, _ALIGN(npy_ucs4),
&PyUnicodeArrType_Type
);
if (s == NULL) {
+ Py_DECREF(infodict);
return -1;
}
- PyDict_SetItemString(infodict, "UNICODE", s);
+ ret = PyDict_SetItemString(infodict, "UNICODE", s);
Py_DECREF(s);
+ if (ret < 0) {
+ Py_DECREF(infodict);
+ return -1;
+ }
s = PyArray_typeinfo(
NPY_VOIDLTR, NPY_VOID, 0, _ALIGN(char),
&PyVoidArrType_Type
);
if (s == NULL) {
+ Py_DECREF(infodict);
return -1;
}
- PyDict_SetItemString(infodict, "VOID", s);
+ ret = PyDict_SetItemString(infodict, "VOID", s);
Py_DECREF(s);
+ if (ret < 0) {
+ Py_DECREF(infodict);
+ return -1;
+ }
s = PyArray_typeinforanged(
NPY_DATETIMELTR, NPY_DATETIME, NPY_BITSOF_DATETIME,
_ALIGN(npy_datetime),
@@ -4725,10 +4614,15 @@ set_typeinfo(PyObject *dict)
&PyDatetimeArrType_Type
);
if (s == NULL) {
+ Py_DECREF(infodict);
return -1;
}
- PyDict_SetItemString(infodict, "DATETIME", s);
+ ret = PyDict_SetItemString(infodict, "DATETIME", s);
Py_DECREF(s);
+ if (ret < 0) {
+ Py_DECREF(infodict);
+ return -1;
+ }
s = PyArray_typeinforanged(
NPY_TIMEDELTALTR, NPY_TIMEDELTA, NPY_BITSOF_TIMEDELTA,
_ALIGN(npy_timedelta),
@@ -4737,15 +4631,23 @@ set_typeinfo(PyObject *dict)
&PyTimedeltaArrType_Type
);
if (s == NULL) {
+ Py_DECREF(infodict);
return -1;
}
- PyDict_SetItemString(infodict, "TIMEDELTA", s);
+ ret = PyDict_SetItemString(infodict, "TIMEDELTA", s);
Py_DECREF(s);
+ if (ret < 0) {
+ Py_DECREF(infodict);
+ return -1;
+ }
-#define SETTYPE(name) \
- Py_INCREF(&Py##name##ArrType_Type); \
- PyDict_SetItemString(infodict, #name, \
- (PyObject *)&Py##name##ArrType_Type)
+#define SETTYPE(name) \
+ Py_INCREF(&Py##name##ArrType_Type); \
+ if (PyDict_SetItemString(infodict, #name, \
+ (PyObject *)&Py##name##ArrType_Type) < 0) { \
+ Py_DECREF(infodict); \
+ return -1; \
+ }
SETTYPE(Generic);
SETTYPE(Number);
@@ -4760,8 +4662,11 @@ set_typeinfo(PyObject *dict)
#undef SETTYPE
- PyDict_SetItemString(dict, "typeinfo", infodict);
+ ret = PyDict_SetItemString(dict, "typeinfo", infodict);
Py_DECREF(infodict);
+ if (ret < 0) {
+ return -1;
+ }
return 0;
}
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index b729027ad..9a1f7b230 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -11,7 +11,7 @@
#include "npy_pycompat.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "common.h"
#include "numpyos.h"
#include "arrayobject.h"
@@ -21,59 +21,6 @@
**************** Implement Buffer Protocol ****************************
*************************************************************************/
-/* removed multiple segment interface */
-
-#if !defined(NPY_PY3K)
-static Py_ssize_t
-array_getsegcount(PyArrayObject *self, Py_ssize_t *lenp)
-{
- if (lenp) {
- *lenp = PyArray_NBYTES(self);
- }
- if (PyArray_ISONESEGMENT(self)) {
- return 1;
- }
- if (lenp) {
- *lenp = 0;
- }
- return 0;
-}
-
-static Py_ssize_t
-array_getreadbuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr)
-{
- if (segment != 0) {
- PyErr_SetString(PyExc_ValueError,
- "accessing non-existing array segment");
- return -1;
- }
- if (PyArray_ISONESEGMENT(self)) {
- *ptrptr = PyArray_DATA(self);
- return PyArray_NBYTES(self);
- }
- PyErr_SetString(PyExc_ValueError, "array is not a single segment");
- *ptrptr = NULL;
- return -1;
-}
-
-
-static Py_ssize_t
-array_getwritebuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr)
-{
- if (PyArray_FailUnlessWriteable(self, "buffer source array") < 0) {
- return -1;
- }
- return array_getreadbuf(self, segment, (void **) ptrptr);
-}
-
-static Py_ssize_t
-array_getcharbuf(PyArrayObject *self, Py_ssize_t segment, constchar **ptrptr)
-{
- return array_getreadbuf(self, segment, (void **) ptrptr);
-}
-#endif /* !defined(NPY_PY3K) */
-
-
/*************************************************************************
* PEP 3118 buffer protocol
*
@@ -151,13 +98,8 @@ _append_field_name(_tmp_string_t *str, PyObject *name)
char *p;
Py_ssize_t len;
PyObject *tmp;
-#if defined(NPY_PY3K)
/* FIXME: XXX -- should it use UTF-8 here? */
tmp = PyUnicode_AsUTF8String(name);
-#else
- tmp = name;
- Py_INCREF(tmp);
-#endif
if (tmp == NULL || PyBytes_AsStringAndSize(tmp, &p, &len) < 0) {
PyErr_Clear();
PyErr_SetString(PyExc_ValueError, "invalid field name");
@@ -523,7 +465,7 @@ _buffer_info_new(PyObject *obj)
/*
* Special case datetime64 scalars to remain backward compatible.
* This will change in a future version.
- * Note arrays of datetime64 and strutured arrays with datetime64
+ * Note arrays of datetime64 and structured arrays with datetime64
* fields will not hit this code path and are currently unsupported
* in _buffer_format_string.
*/
@@ -890,11 +832,6 @@ gentype_getbuffer(PyObject *self, Py_buffer *view, int flags)
descr = PyArray_DescrFromScalar(self);
view->buf = (void *)scalar_value(self, descr);
elsize = descr->elsize;
-#ifndef Py_UNICODE_WIDE
- if (descr->type_num == NPY_UNICODE) {
- elsize >>= 1;
- }
-#endif
view->len = elsize;
if (PyArray_IsScalar(self, Datetime) || PyArray_IsScalar(self, Timedelta)) {
elsize = 1; /* descr->elsize,char is 8,'M', but we return 1,'B' */
@@ -952,12 +889,6 @@ _dealloc_cached_buffer_info(PyObject *self)
/*************************************************************************/
NPY_NO_EXPORT PyBufferProcs array_as_buffer = {
-#if !defined(NPY_PY3K)
- (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/
- (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/
- (segcountproc)array_getsegcount, /*bf_getsegcount*/
- (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/
-#endif
(getbufferproc)array_getbuffer,
(releasebufferproc)0,
};
@@ -968,13 +899,13 @@ NPY_NO_EXPORT PyBufferProcs array_as_buffer = {
*/
static int
-_descriptor_from_pep3118_format_fast(char *s, PyObject **result);
+_descriptor_from_pep3118_format_fast(char const *s, PyObject **result);
static int
_pep3118_letter_to_type(char letter, int native, int complex);
NPY_NO_EXPORT PyArray_Descr*
-_descriptor_from_pep3118_format(char *s)
+_descriptor_from_pep3118_format(char const *s)
{
char *buf, *p;
int in_name = 0;
@@ -1059,7 +990,7 @@ _descriptor_from_pep3118_format(char *s)
*/
static int
-_descriptor_from_pep3118_format_fast(char *s, PyObject **result)
+_descriptor_from_pep3118_format_fast(char const *s, PyObject **result)
{
PyArray_Descr *descr;
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index 1d72a5227..92ab75053 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -772,11 +772,7 @@ PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject *out)
return NULL;
}
if (!out) {
-#if defined(NPY_PY3K)
ret = PyNumber_TrueDivide(obj1, obj2);
-#else
- ret = PyNumber_Divide(obj1, obj2);
-#endif
}
else {
ret = PyObject_CallFunction(n_ops.divide, "OOO", out, obj2, out);
@@ -930,14 +926,15 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o
}
}
- /* NumPy 1.17.0, 2019-02-24 */
- if (DEPRECATE(
- "->f->fastclip is deprecated. Use PyUFunc_RegisterLoopForDescr to "
- "attach a custom loop to np.core.umath.clip, np.minimum, and "
- "np.maximum") < 0) {
- return NULL;
- }
- /* everything below can be removed once this deprecation completes */
+ /*
+ * NumPy 1.17.0, 2019-02-24
+ * NumPy 1.19.0, 2020-01-15
+ *
+ * Setting `->f->fastclip to anything but NULL has been deprecated in 1.19
+ * the code path below was previously deprecated since 1.17.
+ * (the deprecation moved to registration time instead of execution time)
+ * everything below can be removed once this deprecation completes
+ */
if (func == NULL
|| (min != NULL && !PyArray_CheckAnyScalar(min))
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 3270bc20d..0150ae10e 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -12,7 +12,7 @@
#include "usertypes.h"
#include "common.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "get_attr_string.h"
#include "mem_overlap.h"
@@ -121,6 +121,57 @@ PyArray_DTypeFromObject(PyObject *obj, int maxdims, PyArray_Descr **out_dtype)
return res;
}
+/*
+ * Get a suitable string dtype by calling `__str__`.
+ * For `np.bytes_`, this assumes an ASCII encoding.
+ */
+static PyArray_Descr *
+PyArray_DTypeFromObjectStringDiscovery(
+ PyObject *obj, PyArray_Descr *last_dtype, int string_type)
+{
+ int itemsize;
+
+ if (string_type == NPY_STRING) {
+ PyObject *temp = PyObject_Str(obj);
+ if (temp == NULL) {
+ return NULL;
+ }
+ /* assume that when we do the encoding elsewhere we'll use ASCII */
+ itemsize = PyUnicode_GetLength(temp);
+ Py_DECREF(temp);
+ if (itemsize < 0) {
+ return NULL;
+ }
+ }
+ else if (string_type == NPY_UNICODE) {
+ PyObject *temp = PyObject_Str(obj);
+ if (temp == NULL) {
+ return NULL;
+ }
+ itemsize = PyUnicode_GetLength(temp);
+ Py_DECREF(temp);
+ if (itemsize < 0) {
+ return NULL;
+ }
+ itemsize *= 4; /* convert UCS4 codepoints to bytes */
+ }
+ else {
+ return NULL;
+ }
+ if (last_dtype != NULL &&
+ last_dtype->type_num == string_type &&
+ last_dtype->elsize >= itemsize) {
+ Py_INCREF(last_dtype);
+ return last_dtype;
+ }
+ PyArray_Descr *dtype = PyArray_DescrNewFromType(string_type);
+ if (dtype == NULL) {
+ return NULL;
+ }
+ dtype->elsize = itemsize;
+ return dtype;
+}
+
NPY_NO_EXPORT int
PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
PyArray_Descr **out_dtype, int string_type)
@@ -158,50 +209,17 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
}
}
else {
- int itemsize;
- PyObject *temp;
-
- if (string_type == NPY_STRING) {
- if ((temp = PyObject_Str(obj)) == NULL) {
- goto fail;
- }
-#if defined(NPY_PY3K)
- #if PY_VERSION_HEX >= 0x03030000
- itemsize = PyUnicode_GetLength(temp);
- #else
- itemsize = PyUnicode_GET_SIZE(temp);
- #endif
-#else
- itemsize = PyString_GET_SIZE(temp);
-#endif
- }
- else if (string_type == NPY_UNICODE) {
-#if defined(NPY_PY3K)
- if ((temp = PyObject_Str(obj)) == NULL) {
-#else
- if ((temp = PyObject_Unicode(obj)) == NULL) {
-#endif
- goto fail;
- }
- itemsize = PyUnicode_GET_DATA_SIZE(temp);
-#ifndef Py_UNICODE_WIDE
- itemsize <<= 1;
-#endif
- }
- else {
+ dtype = PyArray_DTypeFromObjectStringDiscovery(
+ obj, *out_dtype, string_type);
+ if (dtype == NULL) {
goto fail;
}
- Py_DECREF(temp);
- if (*out_dtype != NULL &&
- (*out_dtype)->type_num == string_type &&
- (*out_dtype)->elsize >= itemsize) {
+
+ /* nothing to do, dtype is already correct */
+ if (dtype == *out_dtype){
+ Py_DECREF(dtype);
return 0;
}
- dtype = PyArray_DescrNewFromType(string_type);
- if (dtype == NULL) {
- goto fail;
- }
- dtype->elsize = itemsize;
}
goto promote_types;
}
@@ -210,54 +228,19 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
dtype = _array_find_python_scalar_type(obj);
if (dtype != NULL) {
if (string_type) {
- int itemsize;
- PyObject *temp;
-
/* dtype is not used in this (string discovery) branch */
Py_DECREF(dtype);
- dtype = NULL;
-
- if (string_type == NPY_STRING) {
- if ((temp = PyObject_Str(obj)) == NULL) {
- goto fail;
- }
-#if defined(NPY_PY3K)
- #if PY_VERSION_HEX >= 0x03030000
- itemsize = PyUnicode_GetLength(temp);
- #else
- itemsize = PyUnicode_GET_SIZE(temp);
- #endif
-#else
- itemsize = PyString_GET_SIZE(temp);
-#endif
- }
- else if (string_type == NPY_UNICODE) {
-#if defined(NPY_PY3K)
- if ((temp = PyObject_Str(obj)) == NULL) {
-#else
- if ((temp = PyObject_Unicode(obj)) == NULL) {
-#endif
- goto fail;
- }
- itemsize = PyUnicode_GET_DATA_SIZE(temp);
-#ifndef Py_UNICODE_WIDE
- itemsize <<= 1;
-#endif
- }
- else {
+ dtype = PyArray_DTypeFromObjectStringDiscovery(
+ obj, *out_dtype, string_type);
+ if (dtype == NULL) {
goto fail;
}
- Py_DECREF(temp);
- if (*out_dtype != NULL &&
- (*out_dtype)->type_num == string_type &&
- (*out_dtype)->elsize >= itemsize) {
+
+ /* nothing to do, dtype is already correct */
+ if (dtype == *out_dtype){
+ Py_DECREF(dtype);
return 0;
}
- dtype = PyArray_DescrNewFromType(string_type);
- if (dtype == NULL) {
- goto fail;
- }
- dtype->elsize = itemsize;
}
goto promote_types;
}
@@ -282,10 +265,11 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
/* Check if it's a Unicode string */
if (PyUnicode_Check(obj)) {
- int itemsize = PyUnicode_GET_DATA_SIZE(obj);
-#ifndef Py_UNICODE_WIDE
- itemsize <<= 1;
-#endif
+ int itemsize = PyUnicode_GetLength(obj);
+ if (itemsize < 0) {
+ goto fail;
+ }
+ itemsize *= 4;
/*
* If it's already a big enough unicode object,
@@ -340,24 +324,21 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if (ip != NULL) {
if (PyDict_Check(ip)) {
PyObject *typestr;
-#if defined(NPY_PY3K)
PyObject *tmp = NULL;
-#endif
- typestr = PyDict_GetItemString(ip, "typestr");
-#if defined(NPY_PY3K)
+ typestr = _PyDict_GetItemStringWithError(ip, "typestr");
+ if (typestr == NULL && PyErr_Occurred()) {
+ goto fail;
+ }
/* Allow unicode type strings */
if (typestr && PyUnicode_Check(typestr)) {
tmp = PyUnicode_AsASCIIString(typestr);
typestr = tmp;
}
-#endif
if (typestr && PyBytes_Check(typestr)) {
dtype =_array_typedescr_fromstr(PyBytes_AS_STRING(typestr));
-#if defined(NPY_PY3K)
if (tmp == typestr) {
Py_DECREF(tmp);
}
-#endif
Py_DECREF(ip);
if (dtype == NULL) {
goto fail;
@@ -367,6 +348,10 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
}
Py_DECREF(ip);
}
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
+
/* The array struct interface */
ip = PyArray_LookupSpecial_OnInstance(obj, "__array_struct__");
@@ -389,19 +374,9 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
}
Py_DECREF(ip);
}
-
- /* The old buffer interface */
-#if !defined(NPY_PY3K)
- if (PyBuffer_Check(obj)) {
- dtype = PyArray_DescrNewFromType(NPY_VOID);
- if (dtype == NULL) {
- goto fail;
- }
- dtype->elsize = Py_TYPE(obj)->tp_as_sequence->sq_length(obj);
- PyErr_Clear();
- goto promote_types;
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
}
-#endif
/* The __array__ attribute */
ip = PyArray_LookupSpecial_OnInstance(obj, "__array__");
@@ -419,6 +394,9 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
goto fail;
}
}
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
/*
* If we reached the maximum recursion depth without hitting one
@@ -468,9 +446,6 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if (common_type != NULL && !string_type &&
(common_type == &PyFloat_Type ||
/* TODO: we could add longs if we add a range check */
-#if !defined(NPY_PY3K)
- common_type == &PyInt_Type ||
-#endif
common_type == &PyBool_Type ||
common_type == &PyComplex_Type)) {
size = 1;
@@ -544,7 +519,7 @@ fail:
/* new reference */
NPY_NO_EXPORT PyArray_Descr *
-_array_typedescr_fromstr(char *c_str)
+_array_typedescr_fromstr(char const *c_str)
{
PyArray_Descr *descr = NULL;
PyObject *stringobj = PyString_FromString(c_str);
@@ -602,12 +577,7 @@ NPY_NO_EXPORT npy_bool
_IsWriteable(PyArrayObject *ap)
{
PyObject *base = PyArray_BASE(ap);
-#if defined(NPY_PY3K)
Py_buffer view;
-#else
- void *dummy;
- Py_ssize_t n;
-#endif
/*
* C-data wrapping arrays may not own their data while not having a base;
@@ -651,7 +621,6 @@ _IsWriteable(PyArrayObject *ap)
assert(!PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA));
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(base, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
PyErr_Clear();
return NPY_FALSE;
@@ -665,12 +634,6 @@ _IsWriteable(PyArrayObject *ap)
* _dealloc_cached_buffer_info, but in this case leave it in the cache to
* speed up future calls to _IsWriteable.
*/
-#else
- if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) {
- PyErr_Clear();
- return NPY_FALSE;
- }
-#endif
return NPY_TRUE;
}
@@ -685,7 +648,7 @@ _IsWriteable(PyArrayObject *ap)
* @return Python unicode string
*/
NPY_NO_EXPORT PyObject *
-convert_shape_to_string(npy_intp n, npy_intp *vals, char *ending)
+convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
{
npy_intp i;
PyObject *ret, *tmp;
@@ -935,5 +898,3 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
}
}
-
-
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index 487d530a1..4913eb202 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -49,7 +49,7 @@ NPY_NO_EXPORT PyArray_Descr *
_array_find_python_scalar_type(PyObject *op);
NPY_NO_EXPORT PyArray_Descr *
-_array_typedescr_fromstr(char *str);
+_array_typedescr_fromstr(char const *str);
NPY_NO_EXPORT char *
index2ptr(PyArrayObject *mp, npy_intp i);
@@ -61,7 +61,7 @@ NPY_NO_EXPORT npy_bool
_IsWriteable(PyArrayObject *ap);
NPY_NO_EXPORT PyObject *
-convert_shape_to_string(npy_intp n, npy_intp *vals, char *ending);
+convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending);
/*
* Sets ValueError with "matrices not aligned" message for np.dot and friends
@@ -235,7 +235,7 @@ npy_uint_alignment(int itemsize)
default:
break;
}
-
+
return alignment;
}
@@ -303,7 +303,11 @@ blas_stride(npy_intp stride, unsigned itemsize)
*/
if (stride > 0 && npy_is_aligned((void *)stride, itemsize)) {
stride /= itemsize;
+#ifndef HAVE_BLAS_ILP64
if (stride <= INT_MAX) {
+#else
+ if (stride <= NPY_MAX_INT64) {
+#endif
return stride;
}
}
@@ -314,7 +318,11 @@ blas_stride(npy_intp stride, unsigned itemsize)
* Define a chunksize for CBLAS. CBLAS counts in integers.
*/
#if NPY_MAX_INTP > INT_MAX
-# define NPY_CBLAS_CHUNK (INT_MAX / 2 + 1)
+# ifndef HAVE_BLAS_ILP64
+# define NPY_CBLAS_CHUNK (INT_MAX / 2 + 1)
+# else
+# define NPY_CBLAS_CHUNK (NPY_MAX_INT64 / 2 + 1)
+# endif
#else
# define NPY_CBLAS_CHUNK NPY_MAX_INTP
#endif
@@ -335,3 +343,4 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
int nd, npy_intp dimensions[], int typenum, PyArrayObject **result);
#endif
+
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 055d3e60f..308e72009 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -1159,12 +1159,12 @@ fail:
}
-/*
+/*
* Inner loop for unravel_index
* order must be NPY_CORDER or NPY_FORTRANORDER
*/
static int
-unravel_index_loop(int unravel_ndim, npy_intp *unravel_dims,
+unravel_index_loop(int unravel_ndim, npy_intp const *unravel_dims,
npy_intp unravel_size, npy_intp count,
char *indices, npy_intp indices_stride,
npy_intp *coords, NPY_ORDER order)
@@ -1186,7 +1186,7 @@ unravel_index_loop(int unravel_ndim, npy_intp *unravel_dims,
}
idx = idx_start;
for (i = 0; i < unravel_ndim; ++i) {
- /*
+ /*
* Using a local seems to enable single-divide optimization
* but only if the / precedes the %
*/
@@ -1242,15 +1242,25 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds)
*/
if (kwds) {
PyObject *dims_item, *shape_item;
- dims_item = PyDict_GetItemString(kwds, "dims");
- shape_item = PyDict_GetItemString(kwds, "shape");
+ dims_item = _PyDict_GetItemStringWithError(kwds, "dims");
+ if (dims_item == NULL && PyErr_Occurred()){
+ return NULL;
+ }
+ shape_item = _PyDict_GetItemStringWithError(kwds, "shape");
+ if (shape_item == NULL && PyErr_Occurred()){
+ return NULL;
+ }
if (dims_item != NULL && shape_item == NULL) {
if (DEPRECATE("'shape' argument should be"
" used instead of 'dims'") < 0) {
return NULL;
}
- PyDict_SetItemString(kwds, "shape", dims_item);
- PyDict_DelItemString(kwds, "dims");
+ if (PyDict_SetItemString(kwds, "shape", dims_item) < 0) {
+ return NULL;
+ }
+ if (PyDict_DelItemString(kwds, "dims") < 0) {
+ return NULL;
+ }
}
}
@@ -1429,25 +1439,33 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (PyGetSetDescr_TypePtr == NULL) {
/* Get "subdescr" */
- myobj = PyDict_GetItemString(tp_dict, "fields");
+ myobj = _PyDict_GetItemStringWithError(tp_dict, "fields");
+ if (myobj == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
if (myobj != NULL) {
PyGetSetDescr_TypePtr = Py_TYPE(myobj);
}
}
if (PyMemberDescr_TypePtr == NULL) {
- myobj = PyDict_GetItemString(tp_dict, "alignment");
+ myobj = _PyDict_GetItemStringWithError(tp_dict, "alignment");
+ if (myobj == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
if (myobj != NULL) {
PyMemberDescr_TypePtr = Py_TYPE(myobj);
}
}
if (PyMethodDescr_TypePtr == NULL) {
- myobj = PyDict_GetItemString(tp_dict, "newbyteorder");
+ myobj = _PyDict_GetItemStringWithError(tp_dict, "newbyteorder");
+ if (myobj == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
if (myobj != NULL) {
PyMethodDescr_TypePtr = Py_TYPE(myobj);
}
}
-#if defined(NPY_PY3K)
if (!PyArg_ParseTuple(args, "OO!:add_docstring", &obj, &PyUnicode_Type, &str)) {
return NULL;
}
@@ -1456,13 +1474,6 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (docstr == NULL) {
return NULL;
}
-#else
- if (!PyArg_ParseTuple(args, "OO!:add_docstring", &obj, &PyString_Type, &str)) {
- return NULL;
- }
-
- docstr = PyString_AS_STRING(str);
-#endif
#define _TESTDOC1(typebase) (Py_TYPE(obj) == &Py##typebase##_Type)
#define _TESTDOC2(typebase) (Py_TYPE(obj) == Py##typebase##_TypePtr)
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 4baa02052..260ae7080 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -16,7 +16,7 @@
#include "conversion_utils.h"
#include "alloc.h"
-#include "buffer.h"
+#include "npy_buffer.h"
static int
PyArray_PyIntAsInt_ErrMsg(PyObject *o, const char * msg) NPY_GCC_NONNULL(2);
@@ -137,6 +137,20 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
return NPY_SUCCEED;
}
+/*
+ * Like PyArray_IntpConverter, but leaves `seq` untouched if `None` is passed
+ * rather than treating `None` as `()`.
+ */
+NPY_NO_EXPORT int
+PyArray_OptionalIntpConverter(PyObject *obj, PyArray_Dims *seq)
+{
+ if (obj == Py_None) {
+ return NPY_SUCCEED;
+ }
+
+ return PyArray_IntpConverter(obj, seq);
+}
+
/*NUMPY_API
* Get buffer chunk from object
*
@@ -152,11 +166,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
NPY_NO_EXPORT int
PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
{
-#if defined(NPY_PY3K)
Py_buffer view;
-#else
- Py_ssize_t buflen;
-#endif
buf->ptr = NULL;
buf->flags = NPY_ARRAY_BEHAVED;
@@ -165,7 +175,6 @@ PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
return NPY_SUCCEED;
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(obj, &view,
PyBUF_ANY_CONTIGUOUS|PyBUF_WRITABLE|PyBUF_SIMPLE) != 0) {
PyErr_Clear();
@@ -192,22 +201,6 @@ PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
if (PyMemoryView_Check(obj)) {
buf->base = PyMemoryView_GET_BASE(obj);
}
-#else
- if (PyObject_AsWriteBuffer(obj, &(buf->ptr), &buflen) < 0) {
- PyErr_Clear();
- buf->flags &= ~NPY_ARRAY_WRITEABLE;
- if (PyObject_AsReadBuffer(obj, (const void **)&(buf->ptr),
- &buflen) < 0) {
- return NPY_FAIL;
- }
- }
- buf->len = (npy_intp) buflen;
-
- /* Point to the base of the buffer object if present */
- if (PyBuffer_Check(obj)) {
- buf->base = ((PyArray_Chunk *)obj)->base;
- }
-#endif
if (buf->base == NULL) {
buf->base = obj;
}
@@ -667,8 +660,8 @@ PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE *modes, int n)
if (object && (PyTuple_Check(object) || PyList_Check(object))) {
if (PySequence_Size(object) != n) {
PyErr_Format(PyExc_ValueError,
- "list of clipmodes has wrong length (%d instead of %d)",
- (int)PySequence_Size(object), n);
+ "list of clipmodes has wrong length (%zd instead of %d)",
+ PySequence_Size(object), n);
return NPY_FAIL;
}
@@ -812,18 +805,6 @@ PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg)
* Since it is the usual case, first check if o is an integer. This is
* an exact check, since otherwise __index__ is used.
*/
-#if !defined(NPY_PY3K)
- if (PyInt_CheckExact(o)) {
- #if (NPY_SIZEOF_LONG <= NPY_SIZEOF_INTP)
- /* No overflow is possible, so we can just return */
- return PyInt_AS_LONG(o);
- #else
- long_value = PyInt_AS_LONG(o);
- goto overflow_check;
- #endif
- }
- else
-#endif
if (PyLong_CheckExact(o)) {
#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
long_value = PyLong_AsLongLong(o);
@@ -1145,7 +1126,7 @@ PyArray_TypestrConvert(int itemsize, int gentype)
PyArray_IntTupleFromIntp
*/
NPY_NO_EXPORT PyObject *
-PyArray_IntTupleFromIntp(int len, npy_intp *vals)
+PyArray_IntTupleFromIntp(int len, npy_intp const *vals)
{
int i;
PyObject *intTuple = PyTuple_New(len);
diff --git a/numpy/core/src/multiarray/conversion_utils.h b/numpy/core/src/multiarray/conversion_utils.h
index cd43f25c3..bee0c6064 100644
--- a/numpy/core/src/multiarray/conversion_utils.h
+++ b/numpy/core/src/multiarray/conversion_utils.h
@@ -7,6 +7,9 @@ NPY_NO_EXPORT int
PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq);
NPY_NO_EXPORT int
+PyArray_OptionalIntpConverter(PyObject *obj, PyArray_Dims *seq);
+
+NPY_NO_EXPORT int
PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf);
NPY_NO_EXPORT int
@@ -37,7 +40,7 @@ NPY_NO_EXPORT int
PyArray_TypestrConvert(int itemsize, int gentype);
NPY_NO_EXPORT PyObject *
-PyArray_IntTupleFromIntp(int len, npy_intp *vals);
+PyArray_IntTupleFromIntp(int len, npy_intp const *vals);
NPY_NO_EXPORT int
PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind);
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index aa4e40e66..e7cbeaa77 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -262,18 +262,12 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
return -1;
}
}
-#if defined(NPY_PY3K)
byteobj = PyUnicode_AsASCIIString(strobj);
-#else
- byteobj = strobj;
-#endif
NPY_BEGIN_ALLOW_THREADS;
n2 = PyBytes_GET_SIZE(byteobj);
n = fwrite(PyBytes_AS_STRING(byteobj), 1, n2, fp);
NPY_END_ALLOW_THREADS;
-#if defined(NPY_PY3K)
Py_DECREF(byteobj);
-#endif
if (n < n2) {
PyErr_Format(PyExc_IOError,
"problem writing element %" NPY_INTP_FMT
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 025c66013..d59a62ed8 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -13,6 +13,7 @@
#include "numpy/npy_math.h"
#include "common.h"
+#include "ctors.h"
#include "scalartypes.h"
#include "mapping.h"
@@ -212,13 +213,17 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
case NPY_HALF:
case NPY_FLOAT:
case NPY_DOUBLE:
- case NPY_LONGDOUBLE:
size = 32;
break;
+ case NPY_LONGDOUBLE:
+ size = 48;
+ break;
case NPY_CFLOAT:
case NPY_CDOUBLE:
+ size = 2 * 32;
+ break;
case NPY_CLONGDOUBLE:
- size = 64;
+ size = 2 * 48;
break;
case NPY_OBJECT:
size = 64;
@@ -255,11 +260,11 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
int ndim = 0;
npy_intp dims[NPY_MAXDIMS];
list = PyArray_ToList((PyArrayObject *)data_obj);
- result = PyArray_GetArrayParamsFromObject(
+ result = PyArray_GetArrayParamsFromObject_int(
list,
retval,
0, &dtype,
- &ndim, dims, &arr, NULL);
+ &ndim, dims, &arr);
Py_DECREF(list);
Py_XDECREF(arr);
if (result < 0) {
@@ -877,7 +882,13 @@ PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to,
from_order = dtype_kind_to_ordering(from->kind);
to_order = dtype_kind_to_ordering(to->kind);
- return from_order != -1 && from_order <= to_order;
+ if (to->kind == 'm') {
+ /* both types being timedelta is already handled before. */
+ int integer_order = dtype_kind_to_ordering('i');
+ return (from_order != -1) && (from_order <= integer_order);
+ }
+
+ return (from_order != -1) && (from_order <= to_order);
}
else {
return 0;
@@ -2115,15 +2126,19 @@ PyArray_ObjectType(PyObject *op, int minimum_type)
/* Raises error when len(op) == 0 */
-/*NUMPY_API*/
+/*NUMPY_API
+ *
+ * This function is only used in one place within NumPy and should
+ * generally be avoided. It is provided mainly for backward compatibility.
+ *
+ * The user of the function has to free the returned array.
+ */
NPY_NO_EXPORT PyArrayObject **
PyArray_ConvertToCommonType(PyObject *op, int *retn)
{
- int i, n, allscalars = 0;
+ int i, n;
+ PyArray_Descr *common_descr = NULL;
PyArrayObject **mps = NULL;
- PyArray_Descr *intype = NULL, *stype = NULL;
- PyArray_Descr *newtype = NULL;
- NPY_SCALARKIND scalarkind = NPY_NOSCALAR, intypekind = NPY_NOSCALAR;
*retn = n = PySequence_Length(op);
if (n == 0) {
@@ -2159,94 +2174,41 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn)
}
for (i = 0; i < n; i++) {
- PyObject *otmp = PySequence_GetItem(op, i);
- if (otmp == NULL) {
+ /* Convert everything to an array, this could be optimized away */
+ PyObject *tmp = PySequence_GetItem(op, i);
+ if (tmp == NULL) {
goto fail;
}
- if (!PyArray_CheckAnyScalar(otmp)) {
- newtype = PyArray_DescrFromObject(otmp, intype);
- Py_DECREF(otmp);
- Py_XDECREF(intype);
- if (newtype == NULL) {
- goto fail;
- }
- intype = newtype;
- intypekind = PyArray_ScalarKind(intype->type_num, NULL);
- }
- else {
- newtype = PyArray_DescrFromObject(otmp, stype);
- Py_DECREF(otmp);
- Py_XDECREF(stype);
- if (newtype == NULL) {
- goto fail;
- }
- stype = newtype;
- scalarkind = PyArray_ScalarKind(newtype->type_num, NULL);
- mps[i] = (PyArrayObject *)Py_None;
- Py_INCREF(Py_None);
- }
- }
- if (intype == NULL) {
- /* all scalars */
- allscalars = 1;
- intype = stype;
- Py_INCREF(intype);
- for (i = 0; i < n; i++) {
- Py_XDECREF(mps[i]);
- mps[i] = NULL;
- }
- }
- else if ((stype != NULL) && (intypekind != scalarkind)) {
- /*
- * we need to upconvert to type that
- * handles both intype and stype
- * also don't forcecast the scalars.
- */
- if (!PyArray_CanCoerceScalar(stype->type_num,
- intype->type_num,
- scalarkind)) {
- newtype = PyArray_PromoteTypes(intype, stype);
- Py_XDECREF(intype);
- intype = newtype;
- if (newtype == NULL) {
- goto fail;
- }
- }
- for (i = 0; i < n; i++) {
- Py_XDECREF(mps[i]);
- mps[i] = NULL;
+
+ mps[i] = (PyArrayObject *)PyArray_FROM_O(tmp);
+ Py_DECREF(tmp);
+ if (mps[i] == NULL) {
+ goto fail;
}
}
+ common_descr = PyArray_ResultType(n, mps, 0, NULL);
+ if (common_descr == NULL) {
+ goto fail;
+ }
- /* Make sure all arrays are actual array objects. */
+ /* Make sure all arrays are contiguous and have the correct dtype. */
for (i = 0; i < n; i++) {
int flags = NPY_ARRAY_CARRAY;
- PyObject *otmp = PySequence_GetItem(op, i);
+ PyArrayObject *tmp = mps[i];
- if (otmp == NULL) {
- goto fail;
- }
- if (!allscalars && ((PyObject *)(mps[i]) == Py_None)) {
- /* forcecast scalars */
- flags |= NPY_ARRAY_FORCECAST;
- Py_DECREF(Py_None);
- }
- Py_INCREF(intype);
- mps[i] = (PyArrayObject*)PyArray_FromAny(otmp, intype, 0, 0,
- flags, NULL);
- Py_DECREF(otmp);
+ Py_INCREF(common_descr);
+ mps[i] = (PyArrayObject *)PyArray_FromArray(tmp, common_descr, flags);
+ Py_DECREF(tmp);
if (mps[i] == NULL) {
goto fail;
}
}
- Py_DECREF(intype);
- Py_XDECREF(stype);
+ Py_DECREF(common_descr);
return mps;
fail:
- Py_XDECREF(intype);
- Py_XDECREF(stype);
+ Py_XDECREF(common_descr);
*retn = 0;
for (i = 0; i < n; i++) {
Py_XDECREF(mps[i]);
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 5174bd889..12bf9eace 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -19,7 +19,7 @@
#include "ctors.h"
#include "convert_datatype.h"
#include "shape.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "lowlevel_strided_loops.h"
#include "methods.h"
#include "_datetime.h"
@@ -41,7 +41,7 @@
*/
/*
- * Scanning function for next element parsing and seperator skipping.
+ * Scanning function for next element parsing and separator skipping.
* These functions return:
* - 0 to indicate more data to read
* - -1 when reading stopped at the end of the string/file
@@ -53,6 +53,9 @@
typedef int (*next_element)(void **, void *, PyArray_Descr *, void *);
typedef int (*skip_separator)(void **, const char *, void *);
+static PyObject *
+_array_from_array_like(PyObject *op,
+ PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context);
static npy_bool
string_is_fully_read(char const* start, char const* end) {
@@ -453,10 +456,6 @@ copy_and_swap(void *dst, void *src, int itemsize, npy_intp numitems,
}
}
-NPY_NO_EXPORT PyObject *
-_array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype,
- npy_bool writeable, PyObject *context);
-
/*
* adapted from Numarray,
* a: destination array
@@ -477,6 +476,11 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
if (dst == NULL)
dst = a;
+ /*
+ * This code is to ensure that the sequence access below will
+ * return a lower-dimensional sequence.
+ */
+
/* INCREF on entry DECREF on exit */
Py_INCREF(s);
@@ -502,40 +506,33 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
return 0;
}
- /*
- * This code is to ensure that the sequence access below will
- * return a lower-dimensional sequence.
- */
-
if (dim > PyArray_NDIM(a)) {
PyErr_Format(PyExc_ValueError,
"setArrayFromSequence: sequence/array dimensions mismatch.");
goto fail;
}
- slen = PySequence_Length(s);
- if (slen < 0) {
+ /* Try __array__ before using s as a sequence */
+ PyObject *tmp = _array_from_array_like(s, NULL, 0, NULL);
+ if (tmp == NULL) {
goto fail;
}
- if (slen > 0) {
- /* gh-13659: try __array__ before using s as a sequence */
- PyObject *tmp = _array_from_array_like(s, /*dtype*/NULL, /*writeable*/0,
- /*context*/NULL);
- if (tmp == NULL) {
+ else if (tmp == Py_NotImplemented) {
+ Py_DECREF(tmp);
+ }
+ else {
+ int r = PyArray_CopyInto(dst, (PyArrayObject *)tmp);
+ Py_DECREF(tmp);
+ if (r < 0) {
goto fail;
}
- else if (tmp == Py_NotImplemented) {
- Py_DECREF(tmp);
- }
- else {
- int r = PyArray_CopyInto(dst, (PyArrayObject *)tmp);
- Py_DECREF(tmp);
- if (r < 0) {
- goto fail;
- }
- Py_DECREF(s);
- return 0;
- }
+ Py_DECREF(s);
+ return 0;
+ }
+
+ slen = PySequence_Length(s);
+ if (slen < 0) {
+ goto fail;
}
/*
@@ -544,8 +541,8 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
*/
if (slen != PyArray_DIMS(a)[dim] && slen != 1) {
PyErr_Format(PyExc_ValueError,
- "cannot copy sequence with size %d to array axis "
- "with dimension %d", (int)slen, (int)PyArray_DIMS(a)[dim]);
+ "cannot copy sequence with size %zd to array axis "
+ "with dimension %" NPY_INTP_FMT, slen, PyArray_DIMS(a)[dim]);
goto fail;
}
@@ -654,13 +651,7 @@ discover_itemsize(PyObject *s, int nd, int *itemsize, int string_type)
}
if ((nd == 0) || PyString_Check(s) ||
-#if defined(NPY_PY3K)
- PyMemoryView_Check(s) ||
-#else
- PyBuffer_Check(s) ||
-#endif
- PyUnicode_Check(s)) {
-
+ PyMemoryView_Check(s) || PyUnicode_Check(s)) {
/* If an object has no length, leave it be */
if (string_type && s != NULL &&
!PyString_Check(s) && !PyUnicode_Check(s)) {
@@ -669,11 +660,7 @@ discover_itemsize(PyObject *s, int nd, int *itemsize, int string_type)
s_string = PyObject_Str(s);
}
else {
-#if defined(NPY_PY3K)
s_string = PyObject_Str(s);
-#else
- s_string = PyObject_Unicode(s);
-#endif
}
if (s_string) {
n = PyObject_Length(s_string);
@@ -713,6 +700,25 @@ discover_itemsize(PyObject *s, int nd, int *itemsize, int string_type)
return 0;
}
+
+typedef enum {
+ DISCOVERED_OK = 0,
+ DISCOVERED_RAGGED = 1,
+ DISCOVERED_OBJECT = 2
+} discovered_t;
+
+
+static void
+_discover_dimensions_array(PyArrayObject *arr, int *maxndim, npy_intp *d) {
+ if (PyArray_NDIM(arr) < *maxndim) {
+ *maxndim = PyArray_NDIM(arr);
+ }
+ for (int i = 0; i < *maxndim; i++) {
+ d[i] = PyArray_DIM(arr, i);
+ }
+}
+
+
/*
* Take an arbitrary object and discover how many dimensions it
* has, filling in the dimensions as we go.
@@ -720,11 +726,10 @@ discover_itemsize(PyObject *s, int nd, int *itemsize, int string_type)
static int
discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
int stop_at_string, int stop_at_tuple,
- int *out_is_object)
+ discovered_t *out_is_object)
{
PyObject *e;
npy_intp n, i;
- Py_buffer buffer_view;
PyObject * seq;
if (*maxndim == 0) {
@@ -733,15 +738,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
/* obj is an Array */
if (PyArray_Check(obj)) {
- PyArrayObject *arr = (PyArrayObject *)obj;
-
- if (PyArray_NDIM(arr) < *maxndim) {
- *maxndim = PyArray_NDIM(arr);
- }
-
- for (i=0; i<*maxndim; i++) {
- d[i] = PyArray_DIM(arr,i);
- }
+ _discover_dimensions_array((PyArrayObject *)obj, maxndim, d);
return 0;
}
@@ -761,10 +758,6 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
/* obj is a String */
if (PyString_Check(obj) ||
-#if defined(NPY_PY3K)
-#else
- PyBuffer_Check(obj) ||
-#endif
PyUnicode_Check(obj)) {
if (stop_at_string) {
*maxndim = 0;
@@ -782,104 +775,24 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
return 0;
}
- /* obj is a PEP 3118 buffer */
- /* PEP 3118 buffer interface */
- if (PyObject_CheckBuffer(obj) == 1) {
- memset(&buffer_view, 0, sizeof(Py_buffer));
- if (PyObject_GetBuffer(obj, &buffer_view,
- PyBUF_STRIDES|PyBUF_SIMPLE) == 0 ||
- PyObject_GetBuffer(obj, &buffer_view,
- PyBUF_ND|PyBUF_SIMPLE) == 0) {
- int nd = buffer_view.ndim;
-
- if (nd < *maxndim) {
- *maxndim = nd;
- }
- for (i = 0; i < *maxndim; i++) {
- d[i] = buffer_view.shape[i];
- }
- PyBuffer_Release(&buffer_view);
- _dealloc_cached_buffer_info(obj);
- return 0;
- }
- else if (PyErr_Occurred()) {
- if (PyErr_ExceptionMatches(PyExc_BufferError) ||
- PyErr_ExceptionMatches(PyExc_TypeError)) {
- PyErr_Clear();
- } else {
- return -1;
- }
- }
- else if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_SIMPLE) == 0) {
- d[0] = buffer_view.len;
- *maxndim = 1;
- PyBuffer_Release(&buffer_view);
- _dealloc_cached_buffer_info(obj);
- return 0;
- }
- else if (PyErr_Occurred()) {
- if (PyErr_ExceptionMatches(PyExc_BufferError) ||
- PyErr_ExceptionMatches(PyExc_TypeError)) {
- PyErr_Clear();
- } else {
- return -1;
- }
- }
- }
-
- /* obj has the __array_struct__ interface */
- e = PyArray_LookupSpecial_OnInstance(obj, "__array_struct__");
- if (e != NULL) {
- int nd = -1;
-
- if (NpyCapsule_Check(e)) {
- PyArrayInterface *inter;
- inter = (PyArrayInterface *)NpyCapsule_AsVoidPtr(e);
- if (inter->two == 2) {
- nd = inter->nd;
- if (nd >= 0) {
- if (nd < *maxndim) {
- *maxndim = nd;
- }
- for (i=0; i<*maxndim; i++) {
- d[i] = inter->shape[i];
- }
- }
- }
- }
+ /*
+ * In the future, the result of `_array_from_array_like` should possibly
+ * be cached. This may require passing the correct dtype/writable
+ * information already in the dimension discovery step (if they are
+ * distinct steps).
+ */
+ e = _array_from_array_like(obj, NULL, NPY_FALSE, NULL);
+ if (e == Py_NotImplemented) {
Py_DECREF(e);
- if (nd >= 0) {
- return 0;
- }
}
-
- /* obj has the __array_interface__ interface */
- e = PyArray_LookupSpecial_OnInstance(obj, "__array_interface__");
- if (e != NULL) {
- int nd = -1;
- if (PyDict_Check(e)) {
- PyObject *new;
- new = PyDict_GetItemString(e, "shape");
- if (new && PyTuple_Check(new)) {
- nd = PyTuple_GET_SIZE(new);
- if (nd < *maxndim) {
- *maxndim = nd;
- }
- for (i=0; i<*maxndim; i++) {
- d[i] = PyInt_AsSsize_t(PyTuple_GET_ITEM(new, i));
- if (d[i] < 0) {
- PyErr_SetString(PyExc_RuntimeError,
- "Invalid shape in __array_interface__");
- Py_DECREF(e);
- return -1;
- }
- }
- }
- }
+ else if (e != NULL) {
+ _discover_dimensions_array((PyArrayObject *)e, maxndim, d);
Py_DECREF(e);
- if (nd >= 0) {
- return 0;
- }
+ return 0;
+ }
+ else if (PyErr_Occurred()) {
+ /* TODO[gh-14801]: propagate crashes during attribute access? */
+ PyErr_Clear();
}
seq = PySequence_Fast(obj, "Could not convert object to sequence");
@@ -899,7 +812,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
if (PyErr_ExceptionMatches(PyExc_KeyError)) {
PyErr_Clear();
*maxndim = 0;
- *out_is_object = 1;
+ *out_is_object = DISCOVERED_OBJECT;
return 0;
}
else {
@@ -958,7 +871,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
*maxndim = all_elems_maxndim + 1;
if (!all_dimensions_match) {
/* typically results in an array containing variable-length lists */
- *out_is_object = 1;
+ *out_is_object = DISCOVERED_RAGGED;
}
}
@@ -968,7 +881,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
}
static PyObject *
-raise_memory_error(int nd, npy_intp *dims, PyArray_Descr *descr)
+raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr)
{
static PyObject *exc_type = NULL;
@@ -1298,8 +1211,8 @@ PyArray_NewFromDescrAndBase(
* NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise.
* NPY_KEEPORDER - Keeps the axis ordering of prototype.
* dtype - If not NULL, overrides the data type of the result.
- * ndim - If not 0 and dims not NULL, overrides the shape of the result.
- * dims - If not NULL and ndim not 0, overrides the shape of the result.
+ * ndim - If not -1, overrides the shape of the result.
+ * dims - If ndim is not -1, overrides the shape of the result.
* subok - If 1, use the prototype's array subtype, otherwise
* always create a base-class array.
*
@@ -1312,7 +1225,7 @@ PyArray_NewLikeArrayWithShape(PyArrayObject *prototype, NPY_ORDER order,
{
PyObject *ret = NULL;
- if (dims == NULL) {
+ if (ndim == -1) {
ndim = PyArray_NDIM(prototype);
dims = PyArray_DIMS(prototype);
}
@@ -1409,7 +1322,7 @@ NPY_NO_EXPORT PyObject *
PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER order,
PyArray_Descr *dtype, int subok)
{
- return PyArray_NewLikeArrayWithShape(prototype, order, dtype, 0, NULL, subok);
+ return PyArray_NewLikeArrayWithShape(prototype, order, dtype, -1, NULL, subok);
}
/*NUMPY_API
@@ -1597,7 +1510,7 @@ fail:
}
-/*
+/**
* Attempts to extract an array from an array-like object.
*
* array-like is defined as either
@@ -1606,43 +1519,58 @@ fail:
* * an object with __array_struct__ or __array_interface__ attributes;
* * an object with an __array__ function.
*
- * Returns Py_NotImplemented if a given object is not array-like;
- * PyArrayObject* in case of success and NULL in case of failure.
+ * @param op The object to convert to an array
+ * @param requested_type a requested dtype instance, may be NULL; The result
+ * DType may be used, but is not enforced.
+ * @param writeable whether the result must be writeable.
+ * @param context Unused parameter, must be NULL (should be removed later).
+ *
+ * @returns The array object, Py_NotImplemented if op is not array-like,
+ * or NULL with an error set. (A new reference to Py_NotImplemented
+ * is returned.)
*/
-NPY_NO_EXPORT PyObject *
-_array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype,
- npy_bool writeable, PyObject *context) {
+static PyObject *
+_array_from_array_like(PyObject *op,
+ PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context) {
PyObject* tmp;
- /* If op supports the PEP 3118 buffer interface */
+ /*
+ * If op supports the PEP 3118 buffer interface.
+ * We skip bytes and unicode since they are considered scalars. Unicode
+ * would fail but bytes would be incorrectly converted to a uint8 array.
+ */
if (!PyBytes_Check(op) && !PyUnicode_Check(op)) {
PyObject *memoryview = PyMemoryView_FromObject(op);
if (memoryview == NULL) {
PyErr_Clear();
}
else {
- tmp = _array_from_buffer_3118(memoryview);
- Py_DECREF(memoryview);
- if (tmp == NULL) {
- return NULL;
- }
+ tmp = _array_from_buffer_3118(memoryview);
+ Py_DECREF(memoryview);
+ if (tmp == NULL) {
+ return NULL;
+ }
- if (writeable
- && PyArray_FailUnlessWriteable((PyArrayObject *) tmp, "PEP 3118 buffer") < 0) {
- Py_DECREF(tmp);
- return NULL;
- }
+ if (writeable
+ && PyArray_FailUnlessWriteable(
+ (PyArrayObject *)tmp, "PEP 3118 buffer") < 0) {
+ Py_DECREF(tmp);
+ return NULL;
+ }
- return tmp;
+ return tmp;
}
}
- /* If op supports the __array_struct__ or __array_interface__ interface */
+ /*
+ * If op supports the __array_struct__ or __array_interface__ interface.
+ */
tmp = PyArray_FromStructInterface(op);
if (tmp == NULL) {
return NULL;
}
if (tmp == Py_NotImplemented) {
+ /* Until the return, NotImplemented is always a borrowed reference*/
tmp = PyArray_FromInterface(op);
if (tmp == NULL) {
return NULL;
@@ -1666,21 +1594,22 @@ _array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype,
}
if (tmp != Py_NotImplemented) {
- if (writeable
- && PyArray_FailUnlessWriteable((PyArrayObject *) tmp,
- "array interface object") < 0) {
+ if (writeable &&
+ PyArray_FailUnlessWriteable((PyArrayObject *)tmp,
+ "array interface object") < 0) {
Py_DECREF(tmp);
return NULL;
}
return tmp;
}
+ /* Until here Py_NotImplemented was borrowed */
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}
-/*NUMPY_API
+/*
* Retrieves the array parameters for viewing/converting an arbitrary
* PyObject* to a NumPy array. This allows the "innate type and shape"
* of Python list-of-lists to be discovered without
@@ -1737,12 +1666,12 @@ _array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype,
* ... use arr ...
*/
NPY_NO_EXPORT int
-PyArray_GetArrayParamsFromObject(PyObject *op,
+PyArray_GetArrayParamsFromObject_int(PyObject *op,
PyArray_Descr *requested_dtype,
npy_bool writeable,
PyArray_Descr **out_dtype,
int *out_ndim, npy_intp *out_dims,
- PyArrayObject **out_arr, PyObject *context)
+ PyArrayObject **out_arr)
{
PyObject *tmp;
@@ -1788,7 +1717,7 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
}
/* If op is an array-like */
- tmp = _array_from_array_like(op, requested_dtype, writeable, context);
+ tmp = _array_from_array_like(op, requested_dtype, writeable, NULL);
if (tmp == NULL) {
return -1;
}
@@ -1800,9 +1729,9 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
Py_DECREF(Py_NotImplemented);
}
- /* Try to treat op as a list of lists or array-like objects. */
+ /* Try to treat op as a list of lists */
if (!writeable && PySequence_Check(op)) {
- int check_it, stop_at_string, stop_at_tuple, is_object;
+ int check_it, stop_at_string, stop_at_tuple;
int type_num, type;
/*
@@ -1852,7 +1781,7 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
((*out_dtype)->names || (*out_dtype)->subarray));
*out_ndim = NPY_MAXDIMS;
- is_object = 0;
+ discovered_t is_object = DISCOVERED_OK;
if (discover_dimensions(
op, out_ndim, out_dims, check_it,
stop_at_string, stop_at_tuple, &is_object) < 0) {
@@ -1869,7 +1798,27 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
return 0;
}
/* If object arrays are forced */
- if (is_object) {
+ if (is_object != DISCOVERED_OK) {
+ static PyObject *visibleDeprecationWarning = NULL;
+ npy_cache_import(
+ "numpy", "VisibleDeprecationWarning",
+ &visibleDeprecationWarning);
+ if (visibleDeprecationWarning == NULL) {
+ return -1;
+ }
+ if (is_object == DISCOVERED_RAGGED && requested_dtype == NULL) {
+ /* NumPy 1.19, 2019-11-01 */
+ if (PyErr_WarnEx(visibleDeprecationWarning, "Creating an "
+ "ndarray from ragged nested sequences (which is a "
+ "list-or-tuple of lists-or-tuples-or ndarrays with "
+ "different lengths or shapes) is deprecated. If you "
+ "meant to do this, you must specify 'dtype=object' "
+ "when creating the ndarray", 1) < 0)
+ {
+ return -1;
+ }
+ }
+ /* either DISCOVERED_OBJECT or there is a requested_dtype */
Py_DECREF(*out_dtype);
*out_dtype = PyArray_DescrFromType(NPY_OBJECT);
if (*out_dtype == NULL) {
@@ -1935,6 +1884,38 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
return -1;
}
+
+/*NUMPY_API*/
+NPY_NO_EXPORT int
+PyArray_GetArrayParamsFromObject(PyObject *op,
+ PyArray_Descr *requested_dtype,
+ npy_bool writeable,
+ PyArray_Descr **out_dtype,
+ int *out_ndim, npy_intp *out_dims,
+ PyArrayObject **out_arr, PyObject *context)
+{
+ /* NumPy 1.19, 2020-01-24 */
+ if (DEPRECATE(
+ "PyArray_GetArrayParamsFromObject() C-API function is deprecated "
+ "and expected to be removed rapidly. If you are using it (i.e. see "
+ "this warning/error), please notify the NumPy developers. "
+ "As of now it is expected that any use case is served similarly "
+ "well by `PyArray_FromAny()` and this function is unused outside "
+ "of NumPy itself.") < 0) {
+ return -1;
+ }
+
+ if (context != NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL");
+ return -1;
+ }
+
+ return PyArray_GetArrayParamsFromObject_int(op,
+ requested_dtype, writeable, out_dtype, out_ndim, out_dims,
+ out_arr);
+}
+
+
/*NUMPY_API
* Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags
* Steals a reference to newtype --- which can be NULL
@@ -1952,17 +1933,21 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
int ndim = 0;
npy_intp dims[NPY_MAXDIMS];
+ if (context != NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL");
+ return NULL;
+ }
+
/* Get either the array or its parameters if it isn't an array */
- if (PyArray_GetArrayParamsFromObject(op, newtype,
- 0, &dtype,
- &ndim, dims, &arr, context) < 0) {
+ if (PyArray_GetArrayParamsFromObject_int(op,
+ newtype, 0, &dtype, &ndim, dims, &arr) < 0) {
Py_XDECREF(newtype);
return NULL;
}
/* If the requested dtype is flexible, adapt it */
if (newtype != NULL) {
- newtype = PyArray_AdaptFlexibleDType(op,
+ newtype = PyArray_AdaptFlexibleDType((arr == NULL) ? op : (PyObject *)arr,
(dtype == NULL) ? PyArray_DESCR(arr) : dtype,
newtype);
if (newtype == NULL) {
@@ -2116,6 +2101,8 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
*
* NPY_ARRAY_FORCECAST will cause a cast to occur regardless of whether or not
* it is safe.
+ *
+ * context is passed through to PyArray_GetArrayParamsFromObject
*/
/*NUMPY_API
@@ -2351,7 +2338,11 @@ PyArray_FromStructInterface(PyObject *input)
attr = PyArray_LookupSpecial_OnInstance(input, "__array_struct__");
if (attr == NULL) {
- return Py_NotImplemented;
+ if (PyErr_Occurred()) {
+ return NULL;
+ } else {
+ return Py_NotImplemented;
+ }
}
if (!NpyCapsule_Check(attr)) {
goto fail;
@@ -2401,9 +2392,7 @@ PyArray_FromStructInterface(PyObject *input)
NPY_NO_EXPORT int
_is_default_descr(PyObject *descr, PyObject *typestr) {
PyObject *tuple, *name, *typestr2;
-#if defined(NPY_PY3K)
PyObject *tmp = NULL;
-#endif
int ret = 0;
if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) {
@@ -2418,7 +2407,6 @@ _is_default_descr(PyObject *descr, PyObject *typestr) {
return 0;
}
typestr2 = PyTuple_GET_ITEM(tuple, 1);
-#if defined(NPY_PY3K)
/* Allow unicode type strings */
if (PyUnicode_Check(typestr2)) {
tmp = PyUnicode_AsASCIIString(typestr2);
@@ -2427,14 +2415,11 @@ _is_default_descr(PyObject *descr, PyObject *typestr) {
}
typestr2 = tmp;
}
-#endif
if (PyBytes_Check(typestr2) &&
PyObject_RichCompareBool(typestr, typestr2, Py_EQ)) {
ret = 1;
}
-#if defined(NPY_PY3K)
Py_XDECREF(tmp);
-#endif
return ret;
}
@@ -2451,11 +2436,7 @@ PyArray_FromInterface(PyObject *origin)
PyArrayObject *ret;
PyArray_Descr *dtype = NULL;
char *data = NULL;
-#if defined(NPY_PY3K)
Py_buffer view;
-#else
- Py_ssize_t buffer_len;
-#endif
int res, i, n;
npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS];
int dataflags = NPY_ARRAY_BEHAVED;
@@ -2463,6 +2444,9 @@ PyArray_FromInterface(PyObject *origin)
iface = PyArray_LookupSpecial_OnInstance(origin,
"__array_interface__");
if (iface == NULL) {
+ if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
return Py_NotImplemented;
}
if (!PyDict_Check(iface)) {
@@ -2473,14 +2457,16 @@ PyArray_FromInterface(PyObject *origin)
}
/* Get type string from interface specification */
- attr = PyDict_GetItemString(iface, "typestr");
+ attr = _PyDict_GetItemStringWithError(iface, "typestr");
if (attr == NULL) {
Py_DECREF(iface);
- PyErr_SetString(PyExc_ValueError,
- "Missing __array_interface__ typestr");
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ValueError,
+ "Missing __array_interface__ typestr");
+ }
return NULL;
}
-#if defined(NPY_PY3K)
+
/* Allow unicode type strings */
if (PyUnicode_Check(attr)) {
PyObject *tmp = PyUnicode_AsASCIIString(attr);
@@ -2492,7 +2478,7 @@ PyArray_FromInterface(PyObject *origin)
else {
Py_INCREF(attr);
}
-#endif
+
if (!PyBytes_Check(attr)) {
PyErr_SetString(PyExc_TypeError,
"__array_interface__ typestr must be a string");
@@ -2509,7 +2495,10 @@ PyArray_FromInterface(PyObject *origin)
* the 'descr' attribute.
*/
if (dtype->type_num == NPY_VOID) {
- PyObject *descr = PyDict_GetItemString(iface, "descr");
+ PyObject *descr = _PyDict_GetItemStringWithError(iface, "descr");
+ if (descr == NULL && PyErr_Occurred()) {
+ goto fail;
+ }
PyArray_Descr *new_dtype = NULL;
if (descr != NULL && !_is_default_descr(descr, attr) &&
@@ -2520,15 +2509,20 @@ PyArray_FromInterface(PyObject *origin)
}
}
-#if defined(NPY_PY3K)
Py_DECREF(attr); /* Pairs with the unicode handling above */
-#endif
/* Get shape tuple from interface specification */
- attr = PyDict_GetItemString(iface, "shape");
+ attr = _PyDict_GetItemStringWithError(iface, "shape");
if (attr == NULL) {
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
/* Shape must be specified when 'data' is specified */
- if (PyDict_GetItemString(iface, "data") != NULL) {
+ PyObject *data = _PyDict_GetItemStringWithError(iface, "data");
+ if (data == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
+ else if (data != NULL) {
Py_DECREF(iface);
PyErr_SetString(PyExc_ValueError,
"Missing __array_interface__ shape");
@@ -2559,7 +2553,10 @@ PyArray_FromInterface(PyObject *origin)
}
/* Get data buffer from interface specification */
- attr = PyDict_GetItemString(iface, "data");
+ attr = _PyDict_GetItemStringWithError(iface, "data");
+ if (attr == NULL && PyErr_Occurred()){
+ return NULL;
+ }
/* Case for data access through pointer */
if (attr && PyTuple_Check(attr)) {
@@ -2603,7 +2600,6 @@ PyArray_FromInterface(PyObject *origin)
else {
base = origin;
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(base, &view,
PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
PyErr_Clear();
@@ -2622,21 +2618,13 @@ PyArray_FromInterface(PyObject *origin)
*/
PyBuffer_Release(&view);
_dealloc_cached_buffer_info(base);
-#else
- res = PyObject_AsWriteBuffer(base, (void **)&data, &buffer_len);
- if (res < 0) {
- PyErr_Clear();
- res = PyObject_AsReadBuffer(
- base, (const void **)&data, &buffer_len);
- if (res < 0) {
- goto fail;
- }
- dataflags &= ~NPY_ARRAY_WRITEABLE;
- }
-#endif
+
/* Get offset number from interface specification */
- attr = PyDict_GetItemString(iface, "offset");
- if (attr) {
+ attr = _PyDict_GetItemStringWithError(iface, "offset");
+ if (attr == NULL && PyErr_Occurred()) {
+ goto fail;
+ }
+ else if (attr) {
npy_longlong num = PyLong_AsLongLong(attr);
if (error_converting(num)) {
PyErr_SetString(PyExc_TypeError,
@@ -2671,7 +2659,10 @@ PyArray_FromInterface(PyObject *origin)
goto fail;
}
}
- attr = PyDict_GetItemString(iface, "strides");
+ attr = _PyDict_GetItemStringWithError(iface, "strides");
+ if (attr == NULL && PyErr_Occurred()){
+ return NULL;
+ }
if (attr != NULL && attr != Py_None) {
if (!PyTuple_Check(attr)) {
PyErr_SetString(PyExc_TypeError,
@@ -2707,40 +2698,30 @@ PyArray_FromInterface(PyObject *origin)
return NULL;
}
-/*NUMPY_API*/
+/*NUMPY_API
+ */
NPY_NO_EXPORT PyObject *
PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context)
{
PyObject *new;
PyObject *array_meth;
+ if (context != NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL");
+ return NULL;
+ }
array_meth = PyArray_LookupSpecial_OnInstance(op, "__array__");
if (array_meth == NULL) {
+ if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
return Py_NotImplemented;
}
- if (context == NULL) {
- if (typecode == NULL) {
- new = PyObject_CallFunction(array_meth, NULL);
- }
- else {
- new = PyObject_CallFunction(array_meth, "O", typecode);
- }
+ if (typecode == NULL) {
+ new = PyObject_CallFunction(array_meth, NULL);
}
else {
- if (typecode == NULL) {
- new = PyObject_CallFunction(array_meth, "OO", Py_None, context);
- if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) {
- PyErr_Clear();
- new = PyObject_CallFunction(array_meth, "");
- }
- }
- else {
- new = PyObject_CallFunction(array_meth, "OO", typecode, context);
- if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) {
- PyErr_Clear();
- new = PyObject_CallFunction(array_meth, "O", typecode);
- }
- }
+ new = PyObject_CallFunction(array_meth, "O", typecode);
}
Py_DECREF(array_meth);
if (new == NULL) {
@@ -2894,8 +2875,8 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
src_size = PyArray_SIZE(src);
if (dst_size != src_size) {
PyErr_Format(PyExc_ValueError,
- "cannot copy from array of size %d into an array "
- "of size %d", (int)src_size, (int)dst_size);
+ "cannot copy from array of size %" NPY_INTP_FMT " into an array "
+ "of size %" NPY_INTP_FMT, src_size, dst_size);
return -1;
}
@@ -3633,7 +3614,7 @@ array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nrea
*/
#define FROM_BUFFER_SIZE 4096
static PyArrayObject *
-array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
+array_from_text(PyArray_Descr *dtype, npy_intp num, char const *sep, size_t *nread,
void *stream, next_element next, skip_separator skip_sep,
void *stream_data)
{
@@ -3823,9 +3804,7 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
{
PyArrayObject *ret;
char *data;
-#if defined(NPY_PY3K)
Py_buffer view;
-#endif
Py_ssize_t ts;
npy_intp s, n;
int itemsize;
@@ -3846,7 +3825,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
return NULL;
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(buf, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
writeable = 0;
PyErr_Clear();
@@ -3865,16 +3843,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
*/
PyBuffer_Release(&view);
_dealloc_cached_buffer_info(buf);
-#else
- if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts) == -1) {
- writeable = 0;
- PyErr_Clear();
- if (PyObject_AsReadBuffer(buf, (void *)&data, &ts) == -1) {
- Py_DECREF(type);
- return NULL;
- }
- }
-#endif
if ((offset < 0) || (offset > ts)) {
PyErr_Format(PyExc_ValueError,
diff --git a/numpy/core/src/multiarray/ctors.h b/numpy/core/src/multiarray/ctors.h
index 4768e4efd..9e63cd7d2 100644
--- a/numpy/core/src/multiarray/ctors.h
+++ b/numpy/core/src/multiarray/ctors.h
@@ -30,6 +30,14 @@ PyArray_New(
PyTypeObject *, int nd, npy_intp const *,
int, npy_intp const*, void *, int, int, PyObject *);
+NPY_NO_EXPORT int
+PyArray_GetArrayParamsFromObject_int(PyObject *op,
+ PyArray_Descr *requested_dtype,
+ npy_bool writeable,
+ PyArray_Descr **out_dtype,
+ int *out_ndim, npy_intp *out_dims,
+ PyArrayObject **out_arr);
+
NPY_NO_EXPORT PyObject *
PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
int max_depth, int flags, PyObject *context);
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index d21bb9776..67ed3ca85 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -71,7 +71,7 @@ numpy_pydatetime_import(void)
}
/* Exported as DATETIMEUNITS in multiarraymodule.c */
-NPY_NO_EXPORT char *_datetime_strings[NPY_DATETIME_NUMUNITS] = {
+NPY_NO_EXPORT char const *_datetime_strings[NPY_DATETIME_NUMUNITS] = {
"Y",
"M",
"W",
@@ -692,6 +692,14 @@ get_datetime_metadata_from_dtype(PyArray_Descr *dtype)
return &(((PyArray_DatetimeDTypeMetaData *)dtype->c_metadata)->meta);
}
+/* strtol does not know whether to put a const qualifier on endptr, wrap
+ * it so we can put this cast in one place.
+ */
+NPY_NO_EXPORT long int
+strtol_const(char const *str, char const **endptr, int base) {
+ return strtol(str, (char**)endptr, base);
+}
+
/*
* Converts a substring given by 'str' and 'len' into
* a date time unit multiplier + enum value, which are populated
@@ -702,15 +710,15 @@ get_datetime_metadata_from_dtype(PyArray_Descr *dtype)
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_datetime_extended_unit_from_string(char *str, Py_ssize_t len,
- char *metastr,
+parse_datetime_extended_unit_from_string(char const *str, Py_ssize_t len,
+ char const *metastr,
PyArray_DatetimeMetaData *out_meta)
{
- char *substr = str, *substrend = NULL;
+ char const *substr = str, *substrend = NULL;
int den = 1;
/* First comes an optional integer multiplier */
- out_meta->num = (int)strtol(substr, &substrend, 10);
+ out_meta->num = (int)strtol_const(substr, &substrend, 10);
if (substr == substrend) {
out_meta->num = 1;
}
@@ -735,7 +743,7 @@ parse_datetime_extended_unit_from_string(char *str, Py_ssize_t len,
/* Next comes an optional integer denominator */
if (substr-str < len && *substr == '/') {
substr++;
- den = (int)strtol(substr, &substrend, 10);
+ den = (int)strtol_const(substr, &substrend, 10);
/* If the '/' exists, there must be a number followed by ']' */
if (substr == substrend || *substrend != ']') {
goto bad_input;
@@ -758,8 +766,8 @@ parse_datetime_extended_unit_from_string(char *str, Py_ssize_t len,
bad_input:
if (metastr != NULL) {
PyErr_Format(PyExc_TypeError,
- "Invalid datetime metadata string \"%s\" at position %d",
- metastr, (int)(substr-metastr));
+ "Invalid datetime metadata string \"%s\" at position %zd",
+ metastr, substr-metastr);
}
else {
PyErr_Format(PyExc_TypeError,
@@ -776,10 +784,10 @@ bad_input:
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_datetime_metadata_from_metastr(char *metastr, Py_ssize_t len,
+parse_datetime_metadata_from_metastr(char const *metastr, Py_ssize_t len,
PyArray_DatetimeMetaData *out_meta)
{
- char *substr = metastr, *substrend = NULL;
+ char const *substr = metastr, *substrend = NULL;
/* Treat the empty string as generic units */
if (len == 0) {
@@ -820,8 +828,8 @@ parse_datetime_metadata_from_metastr(char *metastr, Py_ssize_t len,
bad_input:
if (substr != metastr) {
PyErr_Format(PyExc_TypeError,
- "Invalid datetime metadata string \"%s\" at position %d",
- metastr, (int)(substr-metastr));
+ "Invalid datetime metadata string \"%s\" at position %zd",
+ metastr, substr - metastr);
}
else {
PyErr_Format(PyExc_TypeError,
@@ -837,10 +845,10 @@ bad_input:
* The "type" string should be NULL-terminated.
*/
NPY_NO_EXPORT PyArray_Descr *
-parse_dtype_from_datetime_typestr(char *typestr, Py_ssize_t len)
+parse_dtype_from_datetime_typestr(char const *typestr, Py_ssize_t len)
{
PyArray_DatetimeMetaData meta;
- char *metastr = NULL;
+ char const *metastr = NULL;
int is_timedelta = 0;
Py_ssize_t metalen = 0;
@@ -923,7 +931,7 @@ static NPY_DATETIMEUNIT _multiples_table[16][4] = {
*/
NPY_NO_EXPORT int
convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta,
- int den, char *metastr)
+ int den, char const *metastr)
{
int i, num, ind;
NPY_DATETIMEUNIT *totry;
@@ -1671,7 +1679,7 @@ datetime_type_promotion(PyArray_Descr *type1, PyArray_Descr *type2)
* Returns NPY_DATETIMEUNIT on success, NPY_FR_ERROR on failure.
*/
NPY_NO_EXPORT NPY_DATETIMEUNIT
-parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr)
+parse_datetime_unit_from_string(char const *str, Py_ssize_t len, char const *metastr)
{
/* Use switch statements so the compiler can make it fast */
if (len == 1) {
@@ -1956,7 +1964,7 @@ append_metastr_to_string(PyArray_DatetimeMetaData *meta,
{
PyObject *res;
int num;
- char *basestr;
+ char const *basestr;
if (ret == NULL) {
return NULL;
@@ -2273,15 +2281,15 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
invalid_date:
PyErr_Format(PyExc_ValueError,
- "Invalid date (%d,%d,%d) when converting to NumPy datetime",
- (int)out->year, (int)out->month, (int)out->day);
+ "Invalid date (%" NPY_INT64_FMT ",%" NPY_INT32_FMT ",%" NPY_INT32_FMT ") when converting to NumPy datetime",
+ out->year, out->month, out->day);
return -1;
invalid_time:
PyErr_Format(PyExc_ValueError,
- "Invalid time (%d,%d,%d,%d) when converting "
+ "Invalid time (%" NPY_INT32_FMT ",%" NPY_INT32_FMT ",%" NPY_INT32_FMT ",%" NPY_INT32_FMT ") when converting "
"to NumPy datetime",
- (int)out->hour, (int)out->min, (int)out->sec, (int)out->us);
+ out->hour, out->min, out->sec, out->us);
return -1;
}
@@ -3221,18 +3229,6 @@ NPY_NO_EXPORT PyArrayObject *
datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
PyArray_Descr *dtype)
{
- PyArray_DatetimeMetaData meta;
- /*
- * Both datetime and timedelta are stored as int64, so they can
- * share value variables.
- */
- npy_int64 values[3];
- PyObject *objs[3];
- int type_nums[3];
-
- npy_intp i, length;
- PyArrayObject *ret;
- npy_int64 *ret_data;
/*
* First normalize the input parameters so there is no Py_None,
@@ -3265,6 +3261,8 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
/* Check if the units of the given dtype are generic, in which
* case we use the code path that detects the units
*/
+ int type_nums[3];
+ PyArray_DatetimeMetaData meta;
if (dtype != NULL) {
PyArray_DatetimeMetaData *meta_tmp;
@@ -3313,6 +3311,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
}
/* Set up to convert the objects to a common datetime unit metadata */
+ PyObject *objs[3];
objs[0] = start;
objs[1] = stop;
objs[2] = step;
@@ -3333,11 +3332,22 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
type_nums[2] = NPY_TIMEDELTA;
}
- /* Convert all the arguments */
+ /* Convert all the arguments
+ *
+ * Both datetime and timedelta are stored as int64, so they can
+ * share value variables.
+ */
+ npy_int64 values[3];
if (convert_pyobjects_to_datetimes(3, objs, type_nums,
NPY_SAME_KIND_CASTING, values, &meta) < 0) {
return NULL;
}
+ /* If no start was provided, default to 0 */
+ if (start == NULL) {
+ /* enforced above */
+ assert(type_nums[0] == NPY_TIMEDELTA);
+ values[0] = 0;
+ }
/* If no step was provided, default to 1 */
if (step == NULL) {
@@ -3362,6 +3372,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
}
/* Calculate the array length */
+ npy_intp length;
if (values[2] > 0 && values[1] > values[0]) {
length = (values[1] - values[0] + (values[2] - 1)) / values[2];
}
@@ -3389,19 +3400,20 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
}
/* Create the result array */
- ret = (PyArrayObject *)PyArray_NewFromDescr(
- &PyArray_Type, dtype, 1, &length, NULL,
- NULL, 0, NULL);
+ PyArrayObject *ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, dtype, 1, &length, NULL,
+ NULL, 0, NULL);
+
if (ret == NULL) {
return NULL;
}
if (length > 0) {
/* Extract the data pointer */
- ret_data = (npy_int64 *)PyArray_DATA(ret);
+ npy_int64 *ret_data = (npy_int64 *)PyArray_DATA(ret);
/* Create the timedeltas or datetimes */
- for (i = 0; i < length; ++i) {
+ for (npy_intp i = 0; i < length; ++i) {
*ret_data = values[0];
values[0] += values[2];
ret_data++;
diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c
index cdeb65d0e..d3cce8a37 100644
--- a/numpy/core/src/multiarray/datetime_busday.c
+++ b/numpy/core/src/multiarray/datetime_busday.c
@@ -1012,7 +1012,7 @@ array_busday_offset(PyObject *NPY_UNUSED(self),
/* This steals the datetime_dtype reference */
dates = (PyArrayObject *)PyArray_FromAny(dates_in, datetime_dtype,
- 0, 0, 0, dates_in);
+ 0, 0, 0, NULL);
if (dates == NULL) {
goto fail;
}
@@ -1021,7 +1021,7 @@ array_busday_offset(PyObject *NPY_UNUSED(self),
/* Make 'offsets' into an array */
offsets = (PyArrayObject *)PyArray_FromAny(offsets_in,
PyArray_DescrFromType(NPY_INT64),
- 0, 0, 0, offsets_in);
+ 0, 0, 0, NULL);
if (offsets == NULL) {
goto fail;
}
@@ -1142,7 +1142,7 @@ array_busday_count(PyObject *NPY_UNUSED(self),
/* This steals the datetime_dtype reference */
dates_begin = (PyArrayObject *)PyArray_FromAny(dates_begin_in,
datetime_dtype,
- 0, 0, 0, dates_begin_in);
+ 0, 0, 0, NULL);
if (dates_begin == NULL) {
goto fail;
}
@@ -1165,7 +1165,7 @@ array_busday_count(PyObject *NPY_UNUSED(self),
/* This steals the datetime_dtype reference */
dates_end = (PyArrayObject *)PyArray_FromAny(dates_end_in,
datetime_dtype,
- 0, 0, 0, dates_end_in);
+ 0, 0, 0, NULL);
if (dates_end == NULL) {
goto fail;
}
@@ -1286,7 +1286,7 @@ array_is_busday(PyObject *NPY_UNUSED(self),
/* This steals the datetime_dtype reference */
dates = (PyArrayObject *)PyArray_FromAny(dates_in,
datetime_dtype,
- 0, 0, 0, dates_in);
+ 0, 0, 0, NULL);
if (dates == NULL) {
goto fail;
}
diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c
index 7a26868e8..6936a803f 100644
--- a/numpy/core/src/multiarray/datetime_busdaycal.c
+++ b/numpy/core/src/multiarray/datetime_busdaycal.c
@@ -293,7 +293,7 @@ PyArray_HolidaysConverter(PyObject *dates_in, npy_holidayslist *holidays)
/* This steals the datetime_dtype reference */
dates = (PyArrayObject *)PyArray_FromAny(dates_in, datetime_dtype,
- 0, 0, 0, dates_in);
+ 0, 0, 0, NULL);
if (dates == NULL) {
goto fail;
}
@@ -493,61 +493,12 @@ static PyGetSetDef busdaycalendar_getsets[] = {
};
NPY_NO_EXPORT PyTypeObject NpyBusDayCalendar_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.busdaycalendar", /* tp_name */
- sizeof(NpyBusDayCalendar), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)busdaycalendar_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- busdaycalendar_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)busdaycalendar_init, /* tp_init */
- 0, /* tp_alloc */
- busdaycalendar_new, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.busdaycalendar",
+ .tp_basicsize = sizeof(NpyBusDayCalendar),
+ .tp_dealloc = (destructor)busdaycalendar_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_getset = busdaycalendar_getsets,
+ .tp_init = (initproc)busdaycalendar_init,
+ .tp_new = busdaycalendar_new,
};
diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c
index 95b7bb3dc..4574c05d8 100644
--- a/numpy/core/src/multiarray/datetime_strings.c
+++ b/numpy/core/src/multiarray/datetime_strings.c
@@ -218,7 +218,7 @@ convert_datetimestruct_utc_to_local(npy_datetimestruct *out_dts_local,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_iso_8601_datetime(char *str, Py_ssize_t len,
+parse_iso_8601_datetime(char const *str, Py_ssize_t len,
NPY_DATETIMEUNIT unit,
NPY_CASTING casting,
npy_datetimestruct *out,
@@ -227,7 +227,7 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len,
{
int year_leap = 0;
int i, numdigits;
- char *substr;
+ char const *substr;
Py_ssize_t sublen;
NPY_DATETIMEUNIT bestunit;
@@ -743,8 +743,8 @@ finish:
parse_error:
PyErr_Format(PyExc_ValueError,
- "Error parsing datetime string \"%s\" at position %d",
- str, (int)(substr-str));
+ "Error parsing datetime string \"%s\" at position %zd",
+ str, substr - str);
return -1;
error:
@@ -1487,7 +1487,6 @@ array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args,
/* Get a string size long enough for any datetimes we're given */
strsize = get_datetime_iso_8601_strlen(local, unit);
-#if defined(NPY_PY3K)
/*
* For Python3, allocate the output array as a UNICODE array, so
* that it will behave as strings properly
@@ -1504,7 +1503,6 @@ array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args,
op_dtypes[1] = NULL;
goto fail;
}
-#endif
/* Create the iteration string data type (always ASCII string) */
op_dtypes[1] = PyArray_DescrNewFromType(NPY_STRING);
if (op_dtypes[1] == NULL) {
diff --git a/numpy/core/src/multiarray/datetime_strings.h b/numpy/core/src/multiarray/datetime_strings.h
index 4e60ce929..148369595 100644
--- a/numpy/core/src/multiarray/datetime_strings.h
+++ b/numpy/core/src/multiarray/datetime_strings.h
@@ -33,7 +33,7 @@
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_iso_8601_datetime(char *str, Py_ssize_t len,
+parse_iso_8601_datetime(char const *str, Py_ssize_t len,
NPY_DATETIMEUNIT unit,
NPY_CASTING casting,
npy_datetimestruct *out,
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 23d140cf6..b26a26abf 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -19,7 +19,7 @@
#include "descriptor.h"
#include "alloc.h"
#include "assert.h"
-#include "buffer.h"
+#include "npy_buffer.h"
/*
* offset: A starting offset.
@@ -40,14 +40,27 @@
static PyObject *typeDict = NULL; /* Must be explicitly loaded */
static PyArray_Descr *
-_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag);
+_try_convert_from_inherit_tuple(PyArray_Descr *type, PyObject *newobj);
static PyArray_Descr *
-_arraydescr_from_ctypes_type(PyTypeObject *type)
+_convert_from_any(PyObject *obj, int align);
+
+/*
+ * This function creates a dtype object when the object is a ctypes subclass.
+ *
+ * Returns `Py_NotImplemented` if the type is not a ctypes subclass.
+ */
+static PyArray_Descr *
+_try_convert_from_ctypes_type(PyTypeObject *type)
{
PyObject *_numpy_dtype_ctypes;
PyObject *res;
+ if (!npy_ctypes_check(type)) {
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_Descr *)Py_NotImplemented;
+ }
+
/* Call the python function of the same name. */
_numpy_dtype_ctypes = PyImport_ImportModule("numpy.core._dtype_ctypes");
if (_numpy_dtype_ctypes == NULL) {
@@ -72,25 +85,21 @@ _arraydescr_from_ctypes_type(PyTypeObject *type)
return (PyArray_Descr *)res;
}
+static PyArray_Descr *
+_convert_from_any(PyObject *obj, int align);
+
/*
* This function creates a dtype object when the object has a "dtype" attribute,
* and it can be converted to a dtype object.
*
- * Returns a new reference to a dtype object, or NULL
- * if this is not possible.
- * When the return value is true, the dtype attribute should have been used
- * and parsed. Currently the only failure mode for a 1 return is a
- * RecursionError and the descriptor is set to NULL.
- * When the return value is false, no error will be set.
+ * Returns `Py_NotImplemented` if this is not possible.
+ * Currently the only failure mode for a NULL return is a RecursionError.
*/
-int
-_arraydescr_from_dtype_attr(PyObject *obj, PyArray_Descr **newdescr)
+static PyArray_Descr *
+_try_convert_from_dtype_attr(PyObject *obj)
{
- PyObject *dtypedescr;
- int ret;
-
/* For arbitrary objects that have a "dtype" attribute */
- dtypedescr = PyObject_GetAttrString(obj, "dtype");
+ PyObject *dtypedescr = PyObject_GetAttrString(obj, "dtype");
if (dtypedescr == NULL) {
/*
* This can be reached due to recursion limit being hit while fetching
@@ -103,26 +112,33 @@ _arraydescr_from_dtype_attr(PyObject *obj, PyArray_Descr **newdescr)
" while trying to convert the given data type from its "
"`.dtype` attribute.") != 0) {
Py_DECREF(dtypedescr);
- return 1;
+ return NULL;
}
- ret = PyArray_DescrConverter(dtypedescr, newdescr);
-
+ PyArray_Descr *newdescr = _convert_from_any(dtypedescr, 0);
Py_DECREF(dtypedescr);
Py_LeaveRecursiveCall();
- if (ret != NPY_SUCCEED) {
+ if (newdescr == NULL) {
goto fail;
}
- return 1;
+ return newdescr;
fail:
/* Ignore all but recursion errors, to give ctypes a full try. */
if (!PyErr_ExceptionMatches(PyExc_RecursionError)) {
PyErr_Clear();
- return 0;
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_Descr *)Py_NotImplemented;
}
- return 1;
+ return NULL;
+}
+
+/* Expose to another file with a prefixed name */
+NPY_NO_EXPORT PyArray_Descr *
+_arraydescr_try_convert_from_dtype_attr(PyObject *obj)
+{
+ return _try_convert_from_dtype_attr(obj);
}
/*
@@ -199,7 +215,7 @@ _check_for_commastring(const char *type, Py_ssize_t len)
#undef _chk_byteorder
static int
-is_datetime_typestr(char *type, Py_ssize_t len)
+is_datetime_typestr(char const *type, Py_ssize_t len)
{
if (len < 2) {
return 0;
@@ -225,34 +241,26 @@ is_datetime_typestr(char *type, Py_ssize_t len)
static PyArray_Descr *
_convert_from_tuple(PyObject *obj, int align)
{
- PyArray_Descr *type, *res;
- PyObject *val;
- int errflag;
-
if (PyTuple_GET_SIZE(obj) != 2) {
+ PyErr_Format(PyExc_TypeError,
+ "Tuple must have size 2, but has size %zd",
+ PyTuple_GET_SIZE(obj));
return NULL;
}
- if (align) {
- if (!PyArray_DescrAlignConverter(PyTuple_GET_ITEM(obj, 0), &type)) {
- return NULL;
- }
+ PyArray_Descr *type = _convert_from_any(PyTuple_GET_ITEM(obj, 0), align);
+ if (type == NULL) {
+ return NULL;
}
- else {
- if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj, 0), &type)) {
- return NULL;
- }
- }
- val = PyTuple_GET_ITEM(obj,1);
+ PyObject *val = PyTuple_GET_ITEM(obj,1);
/* try to interpret next item as a type */
- res = _use_inherit(type, val, &errflag);
- if (res || errflag) {
+ PyArray_Descr *res = _try_convert_from_inherit_tuple(type, val);
+ if ((PyObject *)res != Py_NotImplemented) {
Py_DECREF(type);
return res;
}
- PyErr_Clear();
+ Py_DECREF(res);
/*
- * We get here if res was NULL but errflag wasn't set
- * --- i.e. the conversion to a data-descr failed in _use_inherit
+ * We get here if _try_convert_from_inherit_tuple failed without crashing
*/
if (PyDataType_ISUNSIZED(type)) {
/* interpret next item as a typesize */
@@ -291,11 +299,6 @@ _convert_from_tuple(PyObject *obj, int align)
* a new fields attribute.
*/
PyArray_Dims shape = {NULL, -1};
- PyArray_Descr *newdescr = NULL;
- npy_intp items;
- int i, overflowed;
- int nbytes;
-
if (!(PyArray_IntpConverter(val, &shape)) || (shape.len > NPY_MAXDIMS)) {
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple.");
@@ -322,7 +325,7 @@ _convert_from_tuple(PyObject *obj, int align)
}
/* validate and set shape */
- for (i=0; i < shape.len; i++) {
+ for (int i=0; i < shape.len; i++) {
if (shape.ptr[i] < 0) {
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple: "
@@ -336,7 +339,9 @@ _convert_from_tuple(PyObject *obj, int align)
goto fail;
}
}
- items = PyArray_OverflowMultiplyList(shape.ptr, shape.len);
+ npy_intp items = PyArray_OverflowMultiplyList(shape.ptr, shape.len);
+ int overflowed;
+ int nbytes;
if (items < 0 || items > NPY_MAX_INT) {
overflowed = 1;
}
@@ -350,13 +355,14 @@ _convert_from_tuple(PyObject *obj, int align)
"bytes must fit into a C int.");
goto fail;
}
- newdescr = PyArray_DescrNewFromType(NPY_VOID);
+ PyArray_Descr *newdescr = PyArray_DescrNewFromType(NPY_VOID);
if (newdescr == NULL) {
goto fail;
}
newdescr->elsize = nbytes;
newdescr->subarray = PyArray_malloc(sizeof(PyArray_ArrayDescr));
if (newdescr->subarray == NULL) {
+ Py_DECREF(newdescr);
PyErr_NoMemory();
goto fail;
}
@@ -375,13 +381,15 @@ _convert_from_tuple(PyObject *obj, int align)
*/
newdescr->subarray->shape = PyTuple_New(shape.len);
if (newdescr->subarray->shape == NULL) {
+ Py_DECREF(newdescr);
goto fail;
}
- for (i=0; i < shape.len; i++) {
+ for (int i=0; i < shape.len; i++) {
PyTuple_SET_ITEM(newdescr->subarray->shape, i,
PyInt_FromLong((long)shape.ptr[i]));
if (PyTuple_GET_ITEM(newdescr->subarray->shape, i) == NULL) {
+ Py_DECREF(newdescr);
goto fail;
}
}
@@ -391,7 +399,6 @@ _convert_from_tuple(PyObject *obj, int align)
fail:
Py_XDECREF(type);
- Py_XDECREF(newdescr);
npy_free_cache_dim_obj(shape);
return NULL;
}
@@ -410,138 +417,125 @@ _convert_from_tuple(PyObject *obj, int align)
static PyArray_Descr *
_convert_from_array_descr(PyObject *obj, int align)
{
- int n, i, totalsize;
- int ret;
- PyObject *fields, *item, *newobj;
- PyObject *name, *tup, *title;
- PyObject *nameslist;
- PyArray_Descr *new;
- PyArray_Descr *conv;
+ int n = PyList_GET_SIZE(obj);
+ PyObject *nameslist = PyTuple_New(n);
+ if (!nameslist) {
+ return NULL;
+ }
+
/* Types with fields need the Python C API for field access */
char dtypeflags = NPY_NEEDS_PYAPI;
int maxalign = 0;
-
- n = PyList_GET_SIZE(obj);
- nameslist = PyTuple_New(n);
- if (!nameslist) {
+ int totalsize = 0;
+ PyObject *fields = PyDict_New();
+ if (!fields) {
return NULL;
}
- totalsize = 0;
- fields = PyDict_New();
- for (i = 0; i < n; i++) {
- item = PyList_GET_ITEM(obj, i);
+ for (int i = 0; i < n; i++) {
+ PyObject *item = PyList_GET_ITEM(obj, i);
if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2)) {
+ PyErr_Format(PyExc_TypeError,
+ "Field elements must be 2- or 3-tuples, got '%R'",
+ item);
goto fail;
}
- name = PyTuple_GET_ITEM(item, 0);
+ PyObject *name = PyTuple_GET_ITEM(item, 0);
+ PyObject *title;
if (PyBaseString_Check(name)) {
title = NULL;
}
else if (PyTuple_Check(name)) {
if (PyTuple_GET_SIZE(name) != 2) {
+ PyErr_Format(PyExc_TypeError,
+ "If a tuple, the first element of a field tuple must have "
+ "two elements, not %zd",
+ PyTuple_GET_SIZE(name));
goto fail;
}
title = PyTuple_GET_ITEM(name, 0);
name = PyTuple_GET_ITEM(name, 1);
if (!PyBaseString_Check(name)) {
+ PyErr_SetString(PyExc_TypeError, "Field name must be a str");
goto fail;
}
}
else {
+ PyErr_SetString(PyExc_TypeError,
+ "First element of field tuple is "
+ "neither a tuple nor str");
goto fail;
}
/* Insert name into nameslist */
Py_INCREF(name);
-#if !defined(NPY_PY3K)
- /* convert unicode name to ascii on Python 2 if possible */
- if (PyUnicode_Check(name)) {
- PyObject *tmp = PyUnicode_AsASCIIString(name);
- Py_DECREF(name);
- if (tmp == NULL) {
- goto fail;
- }
- name = tmp;
- }
-#endif
if (PyUString_GET_SIZE(name) == 0) {
Py_DECREF(name);
if (title == NULL) {
name = PyUString_FromFormat("f%d", i);
+ if (name == NULL) {
+ goto fail;
+ }
}
-#if defined(NPY_PY3K)
/* On Py3, allow only non-empty Unicode strings as field names */
else if (PyUString_Check(title) && PyUString_GET_SIZE(title) > 0) {
name = title;
Py_INCREF(name);
}
else {
+ PyErr_SetString(PyExc_TypeError, "Field titles must be non-empty strings");
goto fail;
}
-#else
- else {
- name = title;
- Py_INCREF(name);
- }
-#endif
}
PyTuple_SET_ITEM(nameslist, i, name);
/* Process rest */
-
+ PyArray_Descr *conv;
if (PyTuple_GET_SIZE(item) == 2) {
- if (align) {
- ret = PyArray_DescrAlignConverter(PyTuple_GET_ITEM(item, 1),
- &conv);
- }
- else {
- ret = PyArray_DescrConverter(PyTuple_GET_ITEM(item, 1), &conv);
+ conv = _convert_from_any(PyTuple_GET_ITEM(item, 1), align);
+ if (conv == NULL) {
+ goto fail;
}
}
else if (PyTuple_GET_SIZE(item) == 3) {
- newobj = PyTuple_GetSlice(item, 1, 3);
- if (align) {
- ret = PyArray_DescrAlignConverter(newobj, &conv);
- }
- else {
- ret = PyArray_DescrConverter(newobj, &conv);
- }
+ PyObject *newobj = PyTuple_GetSlice(item, 1, 3);
+ conv = _convert_from_any(newobj, align);
Py_DECREF(newobj);
+ if (conv == NULL) {
+ goto fail;
+ }
}
else {
+ PyErr_Format(PyExc_TypeError,
+ "Field elements must be tuples with at most 3 elements, got '%R'", item);
goto fail;
}
- if (ret == NPY_FAIL) {
- goto fail;
- }
-
- if ((PyDict_GetItem(fields, name) != NULL)
+ if ((PyDict_GetItemWithError(fields, name) != NULL)
|| (title
&& PyBaseString_Check(title)
- && (PyDict_GetItem(fields, title) != NULL))) {
-#if defined(NPY_PY3K)
- name = PyUnicode_AsUTF8String(name);
-#endif
+ && (PyDict_GetItemWithError(fields, title) != NULL))) {
PyErr_Format(PyExc_ValueError,
- "field '%s' occurs more than once", PyString_AsString(name));
-#if defined(NPY_PY3K)
- Py_DECREF(name);
-#endif
+ "field %R occurs more than once", name);
+ Py_DECREF(conv);
+ goto fail;
+ }
+ else if (PyErr_Occurred()) {
+ /* Dict lookup crashed */
Py_DECREF(conv);
goto fail;
}
dtypeflags |= (conv->flags & NPY_FROM_FIELDS);
if (align) {
- int _align;
-
- _align = conv->alignment;
+ int _align = conv->alignment;
if (_align > 1) {
totalsize = NPY_NEXT_ALIGNED_OFFSET(totalsize, _align);
}
maxalign = PyArray_MAX(maxalign, _align);
}
- tup = PyTuple_New((title == NULL ? 2 : 3));
+ PyObject *tup = PyTuple_New((title == NULL ? 2 : 3));
+ if (tup == NULL) {
+ goto fail;
+ }
PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize));
@@ -553,19 +547,29 @@ _convert_from_array_descr(PyObject *obj, int align)
if (title != NULL) {
Py_INCREF(title);
PyTuple_SET_ITEM(tup, 2, title);
- PyDict_SetItem(fields, name, tup);
+ if (PyDict_SetItem(fields, name, tup) < 0) {
+ goto fail;
+ }
if (PyBaseString_Check(title)) {
- if (PyDict_GetItem(fields, title) != NULL) {
+ PyObject *existing = PyDict_GetItemWithError(fields, title);
+ if (existing == NULL && PyErr_Occurred()) {
+ goto fail;
+ }
+ if (existing != NULL) {
PyErr_SetString(PyExc_ValueError,
"title already used as a name or title.");
Py_DECREF(tup);
goto fail;
}
- PyDict_SetItem(fields, title, tup);
+ if (PyDict_SetItem(fields, title, tup) < 0) {
+ goto fail;
+ }
}
}
else {
- PyDict_SetItem(fields, name, tup);
+ if (PyDict_SetItem(fields, name, tup) < 0) {
+ goto fail;
+ }
}
totalsize += conv->elsize;
@@ -576,7 +580,7 @@ _convert_from_array_descr(PyObject *obj, int align)
totalsize = NPY_NEXT_ALIGNED_OFFSET(totalsize, maxalign);
}
- new = PyArray_DescrNewFromType(NPY_VOID);
+ PyArray_Descr *new = PyArray_DescrNewFromType(NPY_VOID);
if (new == NULL) {
Py_XDECREF(fields);
Py_XDECREF(nameslist);
@@ -609,69 +613,81 @@ _convert_from_array_descr(PyObject *obj, int align)
static PyArray_Descr *
_convert_from_list(PyObject *obj, int align)
{
- int n, i;
- int totalsize;
- PyObject *fields;
- PyArray_Descr *conv = NULL;
- PyArray_Descr *new;
- PyObject *key, *tup;
- PyObject *nameslist = NULL;
- int ret;
- int maxalign = 0;
- /* Types with fields need the Python C API for field access */
- char dtypeflags = NPY_NEEDS_PYAPI;
-
- n = PyList_GET_SIZE(obj);
+ int n = PyList_GET_SIZE(obj);
/*
* Ignore any empty string at end which _internal._commastring
* can produce
*/
- key = PyList_GET_ITEM(obj, n-1);
- if (PyBytes_Check(key) && PyBytes_GET_SIZE(key) == 0) {
- n = n - 1;
+ PyObject *last_item = PyList_GET_ITEM(obj, n-1);
+ if (PyUnicode_Check(last_item)) {
+ Py_ssize_t s = PySequence_Size(last_item);
+ if (s < 0) {
+ return NULL;
+ }
+ if (s == 0) {
+ n = n - 1;
+ }
}
- /* End ignore code.*/
- totalsize = 0;
if (n == 0) {
+ PyErr_SetString(PyExc_ValueError, "Expected at least one field name");
return NULL;
}
- nameslist = PyTuple_New(n);
+ PyObject *nameslist = PyTuple_New(n);
if (!nameslist) {
return NULL;
}
- fields = PyDict_New();
- for (i = 0; i < n; i++) {
- tup = PyTuple_New(2);
- key = PyUString_FromFormat("f%d", i);
- if (align) {
- ret = PyArray_DescrAlignConverter(PyList_GET_ITEM(obj, i), &conv);
- }
- else {
- ret = PyArray_DescrConverter(PyList_GET_ITEM(obj, i), &conv);
- }
- if (ret == NPY_FAIL) {
- Py_DECREF(tup);
- Py_DECREF(key);
+ PyObject *fields = PyDict_New();
+ if (!fields) {
+ Py_DECREF(nameslist);
+ return NULL;
+ }
+
+ /* Types with fields need the Python C API for field access */
+ char dtypeflags = NPY_NEEDS_PYAPI;
+ int maxalign = 0;
+ int totalsize = 0;
+ for (int i = 0; i < n; i++) {
+ PyArray_Descr *conv = _convert_from_any(
+ PyList_GET_ITEM(obj, i), align);
+ if (conv == NULL) {
goto fail;
}
dtypeflags |= (conv->flags & NPY_FROM_FIELDS);
- PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
if (align) {
- int _align;
-
- _align = conv->alignment;
+ int _align = conv->alignment;
if (_align > 1) {
totalsize = NPY_NEXT_ALIGNED_OFFSET(totalsize, _align);
}
maxalign = PyArray_MAX(maxalign, _align);
}
- PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize));
- PyDict_SetItem(fields, key, tup);
- Py_DECREF(tup);
+ PyObject *size_obj = PyInt_FromLong((long) totalsize);
+ if (!size_obj) {
+ Py_DECREF(conv);
+ goto fail;
+ }
+ PyObject *tup = PyTuple_New(2);
+ if (!tup) {
+ Py_DECREF(size_obj);
+ Py_DECREF(conv);
+ goto fail;
+ }
+ PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
+ PyTuple_SET_ITEM(tup, 1, size_obj);
+ PyObject *key = PyUString_FromFormat("f%d", i);
+ if (!key) {
+ Py_DECREF(tup);
+ goto fail;
+ }
+ /* steals a reference to key */
PyTuple_SET_ITEM(nameslist, i, key);
+ int ret = PyDict_SetItem(fields, key, tup);
+ Py_DECREF(tup);
+ if (ret < 0) {
+ goto fail;
+ }
totalsize += conv->elsize;
}
- new = PyArray_DescrNewFromType(NPY_VOID);
+ PyArray_Descr *new = PyArray_DescrNewFromType(NPY_VOID);
new->fields = fields;
new->names = nameslist;
new->flags = dtypeflags;
@@ -710,10 +726,7 @@ _convert_from_commastring(PyObject *obj, int align)
PyObject *listobj;
PyArray_Descr *res;
PyObject *_numpy_internal;
-
- if (!PyBytes_Check(obj)) {
- return NULL;
- }
+ assert(PyUnicode_Check(obj));
_numpy_internal = PyImport_ImportModule("numpy.core._internal");
if (_numpy_internal == NULL) {
return NULL;
@@ -730,22 +743,12 @@ _convert_from_commastring(PyObject *obj, int align)
return NULL;
}
if (PyList_GET_SIZE(listobj) == 1) {
- int retcode;
- retcode = PyArray_DescrConverter(PyList_GET_ITEM(listobj, 0),
- &res);
- if (retcode == NPY_FAIL) {
- res = NULL;
- }
+ res = _convert_from_any(PyList_GET_ITEM(listobj, 0), align);
}
else {
res = _convert_from_list(listobj, align);
}
Py_DECREF(listobj);
- if (!res && !PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError,
- "invalid data-type");
- return NULL;
- }
return res;
}
@@ -766,7 +769,7 @@ _is_tuple_of_integers(PyObject *obj)
}
/*
- * helper function for _use_inherit to disallow dtypes of the form
+ * helper function for _try_convert_from_inherit_tuple to disallow dtypes of the form
* (old_dtype, new_dtype) where either of the dtypes contains python
* objects - these dtypes are not useful and can be a source of segfaults,
* when an attempt is made to interpret a python object as a different dtype
@@ -775,7 +778,7 @@ _is_tuple_of_integers(PyObject *obj)
* people have been using to add a field to an object array without fields
*/
static int
-invalid_union_object_dtype(PyArray_Descr *new, PyArray_Descr *conv)
+_validate_union_object_dtype(PyArray_Descr *new, PyArray_Descr *conv)
{
PyObject *name, *tup;
PyArray_Descr *dtype;
@@ -793,8 +796,12 @@ invalid_union_object_dtype(PyArray_Descr *new, PyArray_Descr *conv)
if (name == NULL) {
return -1;
}
- tup = PyDict_GetItem(conv->fields, name);
+ tup = PyDict_GetItemWithError(conv->fields, name);
if (tup == NULL) {
+ if (!PyErr_Occurred()) {
+ /* fields was missing the name it claimed to contain */
+ PyErr_BadInternalCall();
+ }
return -1;
}
dtype = (PyArray_Descr *)PyTuple_GET_ITEM(tup, 0);
@@ -827,21 +834,26 @@ fail:
* a['real'] and a['imag'] to an int32 array.
*
* leave type reference alone
+ *
+ * Returns `Py_NotImplemented` if the second tuple item is not
+ * appropriate.
*/
static PyArray_Descr *
-_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag)
+_try_convert_from_inherit_tuple(PyArray_Descr *type, PyObject *newobj)
{
- PyArray_Descr *new;
- PyArray_Descr *conv;
-
- *errflag = 0;
- if (PyArray_IsScalar(newobj, Integer)
- || _is_tuple_of_integers(newobj)
- || !PyArray_DescrConverter(newobj, &conv)) {
- return NULL;
+ if (PyArray_IsScalar(newobj, Integer) || _is_tuple_of_integers(newobj)) {
+ /* It's a subarray or flexible type instead */
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_Descr *)Py_NotImplemented;
+ }
+ PyArray_Descr *conv = _convert_from_any(newobj, 0);
+ if (conv == NULL) {
+ /* Let someone else try to convert this */
+ PyErr_Clear();
+ Py_INCREF(Py_NotImplemented);
+ return (PyArray_Descr *)Py_NotImplemented;
}
- *errflag = 1;
- new = PyArray_DescrNew(type);
+ PyArray_Descr *new = PyArray_DescrNew(type);
if (new == NULL) {
goto fail;
}
@@ -854,7 +866,7 @@ _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag)
Py_DECREF(new);
goto fail;
}
- else if (invalid_union_object_dtype(new, conv)) {
+ else if (_validate_union_object_dtype(new, conv) < 0) {
Py_DECREF(new);
goto fail;
}
@@ -875,7 +887,6 @@ _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag)
}
new->flags = conv->flags;
Py_DECREF(conv);
- *errflag = 0;
return new;
fail:
@@ -895,7 +906,7 @@ _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag)
* Returns 0 on success, -1 if an exception is raised.
*/
static int
-validate_object_field_overlap(PyArray_Descr *dtype)
+_validate_object_field_overlap(PyArray_Descr *dtype)
{
PyObject *names, *fields, *key, *tup, *title;
Py_ssize_t i, j, names_size;
@@ -912,8 +923,12 @@ validate_object_field_overlap(PyArray_Descr *dtype)
if (key == NULL) {
return -1;
}
- tup = PyDict_GetItem(fields, key);
+ tup = PyDict_GetItemWithError(fields, key);
if (tup == NULL) {
+ if (!PyErr_Occurred()) {
+ /* fields was missing the name it claimed to contain */
+ PyErr_BadInternalCall();
+ }
return -1;
}
if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &fld_offset, &title)) {
@@ -928,8 +943,12 @@ validate_object_field_overlap(PyArray_Descr *dtype)
if (key == NULL) {
return -1;
}
- tup = PyDict_GetItem(fields, key);
+ tup = PyDict_GetItemWithError(fields, key);
if (tup == NULL) {
+ if (!PyErr_Occurred()) {
+ /* fields was missing the name it claimed to contain */
+ PyErr_BadInternalCall();
+ }
return -1;
}
if (!PyArg_ParseTuple(tup, "Oi|O", &fld2_dtype,
@@ -991,7 +1010,7 @@ validate_object_field_overlap(PyArray_Descr *dtype)
* then it will be checked for conformity and used directly.
*/
static PyArray_Descr *
-_use_fields_dict(PyObject *obj, int align)
+_convert_from_field_dict(PyObject *obj, int align)
{
PyObject *_numpy_internal;
PyArray_Descr *res;
@@ -1012,48 +1031,34 @@ _use_fields_dict(PyObject *obj, int align)
static PyArray_Descr *
_convert_from_dict(PyObject *obj, int align)
{
- PyArray_Descr *new;
- PyObject *fields = NULL;
- PyObject *names = NULL;
- PyObject *offsets= NULL;
- PyObject *descrs = NULL;
- PyObject *titles = NULL;
- PyObject *metadata, *tmp;
- int n, i;
- int totalsize, itemsize;
- int maxalign = 0;
- /* Types with fields need the Python C API for field access */
- char dtypeflags = NPY_NEEDS_PYAPI;
- int has_out_of_order_fields = 0;
-
- fields = PyDict_New();
+ PyObject *fields = PyDict_New();
if (fields == NULL) {
return (PyArray_Descr *)PyErr_NoMemory();
}
/*
* Use PyMapping_GetItemString to support dictproxy objects as well.
*/
- names = PyMapping_GetItemString(obj, "names");
+ PyObject *names = PyMapping_GetItemString(obj, "names");
if (names == NULL) {
Py_DECREF(fields);
/* XXX should check this is a KeyError */
PyErr_Clear();
- return _use_fields_dict(obj, align);
+ return _convert_from_field_dict(obj, align);
}
- descrs = PyMapping_GetItemString(obj, "formats");
+ PyObject *descrs = PyMapping_GetItemString(obj, "formats");
if (descrs == NULL) {
Py_DECREF(fields);
/* XXX should check this is a KeyError */
PyErr_Clear();
Py_DECREF(names);
- return _use_fields_dict(obj, align);
+ return _convert_from_field_dict(obj, align);
}
- n = PyObject_Length(names);
- offsets = PyMapping_GetItemString(obj, "offsets");
+ int n = PyObject_Length(names);
+ PyObject *offsets = PyMapping_GetItemString(obj, "offsets");
if (!offsets) {
PyErr_Clear();
}
- titles = PyMapping_GetItemString(obj, "titles");
+ PyObject *titles = PyMapping_GetItemString(obj, "titles");
if (!titles) {
PyErr_Clear();
}
@@ -1071,7 +1076,7 @@ _convert_from_dict(PyObject *obj, int align)
* If a property 'aligned' is in the dict, it overrides the align flag
* to be True if it not already true.
*/
- tmp = PyMapping_GetItemString(obj, "aligned");
+ PyObject *tmp = PyMapping_GetItemString(obj, "aligned");
if (tmp == NULL) {
PyErr_Clear();
} else {
@@ -1088,16 +1093,16 @@ _convert_from_dict(PyObject *obj, int align)
Py_DECREF(tmp);
}
- totalsize = 0;
- for (i = 0; i < n; i++) {
- PyObject *tup, *descr, *ind, *title, *name, *off;
- int len, ret, _align = 1;
- PyArray_Descr *newdescr;
-
+ /* Types with fields need the Python C API for field access */
+ char dtypeflags = NPY_NEEDS_PYAPI;
+ int totalsize = 0;
+ int maxalign = 0;
+ int has_out_of_order_fields = 0;
+ for (int i = 0; i < n; i++) {
/* Build item to insert (descr, offset, [title])*/
- len = 2;
- title = NULL;
- ind = PyInt_FromLong(i);
+ int len = 2;
+ PyObject *title = NULL;
+ PyObject *ind = PyInt_FromLong(i);
if (titles) {
title=PyObject_GetItem(titles, ind);
if (title && title != Py_None) {
@@ -1108,39 +1113,34 @@ _convert_from_dict(PyObject *obj, int align)
}
PyErr_Clear();
}
- tup = PyTuple_New(len);
- descr = PyObject_GetItem(descrs, ind);
+ PyObject *tup = PyTuple_New(len);
+ PyObject *descr = PyObject_GetItem(descrs, ind);
if (!descr) {
Py_DECREF(tup);
Py_DECREF(ind);
goto fail;
}
- if (align) {
- ret = PyArray_DescrAlignConverter(descr, &newdescr);
- }
- else {
- ret = PyArray_DescrConverter(descr, &newdescr);
- }
+ PyArray_Descr *newdescr = _convert_from_any(descr, align);
Py_DECREF(descr);
- if (ret == NPY_FAIL) {
+ if (newdescr == NULL) {
Py_DECREF(tup);
Py_DECREF(ind);
goto fail;
}
PyTuple_SET_ITEM(tup, 0, (PyObject *)newdescr);
+ int _align = 1;
if (align) {
_align = newdescr->alignment;
maxalign = PyArray_MAX(maxalign,_align);
}
if (offsets) {
- long offset;
- off = PyObject_GetItem(offsets, ind);
+ PyObject *off = PyObject_GetItem(offsets, ind);
if (!off) {
Py_DECREF(tup);
Py_DECREF(ind);
goto fail;
}
- offset = PyArray_PyIntAsInt(off);
+ long offset = PyArray_PyIntAsInt(off);
if (error_converting(offset)) {
Py_DECREF(off);
Py_DECREF(tup);
@@ -1149,8 +1149,8 @@ _convert_from_dict(PyObject *obj, int align)
}
Py_DECREF(off);
if (offset < 0) {
- PyErr_Format(PyExc_ValueError, "offset %d cannot be negative",
- (int)offset);
+ PyErr_Format(PyExc_ValueError, "offset %ld cannot be negative",
+ offset);
Py_DECREF(tup);
Py_DECREF(ind);
goto fail;
@@ -1164,11 +1164,13 @@ _convert_from_dict(PyObject *obj, int align)
/* If align=True, enforce field alignment */
if (align && offset % newdescr->alignment != 0) {
PyErr_Format(PyExc_ValueError,
- "offset %d for NumPy dtype with fields is "
+ "offset %ld for NumPy dtype with fields is "
"not divisible by the field alignment %d "
"with align=True",
- (int)offset, (int)newdescr->alignment);
- ret = NPY_FAIL;
+ offset, newdescr->alignment);
+ Py_DECREF(ind);
+ Py_DECREF(tup);
+ goto fail;
}
else if (offset + newdescr->elsize > totalsize) {
totalsize = offset + newdescr->elsize;
@@ -1181,15 +1183,10 @@ _convert_from_dict(PyObject *obj, int align)
PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(totalsize));
totalsize += newdescr->elsize;
}
- if (ret == NPY_FAIL) {
- Py_DECREF(ind);
- Py_DECREF(tup);
- goto fail;
- }
if (len == 3) {
PyTuple_SET_ITEM(tup, 2, title);
}
- name = PyObject_GetItem(names, ind);
+ PyObject *name = PyObject_GetItem(names, ind);
Py_DECREF(ind);
if (!name) {
Py_DECREF(tup);
@@ -1203,33 +1200,46 @@ _convert_from_dict(PyObject *obj, int align)
}
/* Insert into dictionary */
- if (PyDict_GetItem(fields, name) != NULL) {
+ if (PyDict_GetItemWithError(fields, name) != NULL) {
PyErr_SetString(PyExc_ValueError,
"name already used as a name or title");
Py_DECREF(tup);
goto fail;
}
- PyDict_SetItem(fields, name, tup);
+ else if (PyErr_Occurred()) {
+ /* MemoryError during dict lookup */
+ Py_DECREF(tup);
+ goto fail;
+ }
+ int ret = PyDict_SetItem(fields, name, tup);
Py_DECREF(name);
+ if (ret < 0) {
+ Py_DECREF(tup);
+ goto fail;
+ }
if (len == 3) {
if (PyBaseString_Check(title)) {
- if (PyDict_GetItem(fields, title) != NULL) {
+ if (PyDict_GetItemWithError(fields, title) != NULL) {
PyErr_SetString(PyExc_ValueError,
"title already used as a name or title.");
Py_DECREF(tup);
goto fail;
}
- PyDict_SetItem(fields, title, tup);
+ else if (PyErr_Occurred()) {
+ /* MemoryError during dict lookup */
+ goto fail;
+ }
+ if (PyDict_SetItem(fields, title, tup) < 0) {
+ Py_DECREF(tup);
+ goto fail;
+ }
}
}
Py_DECREF(tup);
- if (ret == NPY_FAIL) {
- goto fail;
- }
dtypeflags |= (newdescr->flags & NPY_FROM_FIELDS);
}
- new = PyArray_DescrNewFromType(NPY_VOID);
+ PyArray_Descr *new = PyArray_DescrNewFromType(NPY_VOID);
if (new == NULL) {
goto fail;
}
@@ -1259,7 +1269,7 @@ _convert_from_dict(PyObject *obj, int align)
* need to verify that no OBJECT types overlap with something else.
*/
if (has_out_of_order_fields && PyDataType_REFCHK(new)) {
- if (validate_object_field_overlap(new) < 0) {
+ if (_validate_object_field_overlap(new) < 0) {
Py_DECREF(new);
goto fail;
}
@@ -1275,7 +1285,7 @@ _convert_from_dict(PyObject *obj, int align)
if (tmp == NULL) {
PyErr_Clear();
} else {
- itemsize = (int)PyArray_PyIntAsInt(tmp);
+ int itemsize = (int)PyArray_PyIntAsInt(tmp);
Py_DECREF(tmp);
if (error_converting(itemsize)) {
Py_DECREF(new);
@@ -1286,7 +1296,7 @@ _convert_from_dict(PyObject *obj, int align)
PyErr_Format(PyExc_ValueError,
"NumPy dtype descriptor requires %d bytes, "
"cannot override to smaller itemsize of %d",
- (int)new->elsize, (int)itemsize);
+ new->elsize, itemsize);
Py_DECREF(new);
goto fail;
}
@@ -1295,7 +1305,7 @@ _convert_from_dict(PyObject *obj, int align)
PyErr_Format(PyExc_ValueError,
"NumPy dtype descriptor requires alignment of %d bytes, "
"which is not divisible into the specified itemsize %d",
- (int)new->alignment, (int)itemsize);
+ new->alignment, itemsize);
Py_DECREF(new);
goto fail;
}
@@ -1304,7 +1314,7 @@ _convert_from_dict(PyObject *obj, int align)
}
/* Add the metadata if provided */
- metadata = PyMapping_GetItemString(obj, "metadata");
+ PyObject *metadata = PyMapping_GetItemString(obj, "metadata");
if (metadata == NULL) {
PyErr_Clear();
@@ -1366,385 +1376,352 @@ PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at)
}
}
-/*NUMPY_API
- * Get typenum from an object -- None goes to NPY_DEFAULT_TYPE
- * This function takes a Python object representing a type and converts it
- * to a the correct PyArray_Descr * structure to describe the type.
- *
- * Many objects can be used to represent a data-type which in NumPy is
- * quite a flexible concept.
- *
- * This is the central code that converts Python objects to
- * Type-descriptor objects that are used throughout numpy.
- *
- * Returns a new reference in *at, but the returned should not be
- * modified as it may be one of the canonical immutable objects or
- * a reference to the input obj.
+/**
+ * Get a dtype instance from a python type
*/
-NPY_NO_EXPORT int
-PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
-{
- int check_num = NPY_NOTYPE + 10;
- int elsize = 0;
- char endian = '=';
-
- *at = NULL;
+static PyArray_Descr *
+_convert_from_type(PyObject *obj) {
+ PyTypeObject *typ = (PyTypeObject*)obj;
- /* default */
- if (obj == Py_None) {
- *at = PyArray_DescrFromType(NPY_DEFAULT_TYPE);
- return NPY_SUCCEED;
+ if (PyType_IsSubtype(typ, &PyGenericArrType_Type)) {
+ return PyArray_DescrFromTypeObject(obj);
}
-
- if (PyArray_DescrCheck(obj)) {
- *at = (PyArray_Descr *)obj;
- Py_INCREF(*at);
- return NPY_SUCCEED;
+ else if (typ == &PyLong_Type) {
+ return PyArray_DescrFromType(NPY_LONG);
}
-
- if (PyType_Check(obj)) {
- if (PyType_IsSubtype((PyTypeObject *)obj, &PyGenericArrType_Type)) {
- *at = PyArray_DescrFromTypeObject(obj);
- return (*at) ? NPY_SUCCEED : NPY_FAIL;
- }
- check_num = NPY_OBJECT;
-#if !defined(NPY_PY3K)
- if (obj == (PyObject *)(&PyInt_Type)) {
- check_num = NPY_LONG;
- }
- else if (obj == (PyObject *)(&PyLong_Type)) {
- check_num = NPY_LONGLONG;
- }
-#else
- if (obj == (PyObject *)(&PyLong_Type)) {
- check_num = NPY_LONG;
- }
-#endif
- else if (obj == (PyObject *)(&PyFloat_Type)) {
- check_num = NPY_DOUBLE;
- }
- else if (obj == (PyObject *)(&PyComplex_Type)) {
- check_num = NPY_CDOUBLE;
- }
- else if (obj == (PyObject *)(&PyBool_Type)) {
- check_num = NPY_BOOL;
- }
- else if (obj == (PyObject *)(&PyBytes_Type)) {
- check_num = NPY_STRING;
- }
- else if (obj == (PyObject *)(&PyUnicode_Type)) {
- check_num = NPY_UNICODE;
- }
-#if defined(NPY_PY3K)
- else if (obj == (PyObject *)(&PyMemoryView_Type)) {
-#else
- else if (obj == (PyObject *)(&PyBuffer_Type)) {
-#endif
- check_num = NPY_VOID;
+ else if (typ == &PyFloat_Type) {
+ return PyArray_DescrFromType(NPY_DOUBLE);
+ }
+ else if (typ == &PyComplex_Type) {
+ return PyArray_DescrFromType(NPY_CDOUBLE);
+ }
+ else if (typ == &PyBool_Type) {
+ return PyArray_DescrFromType(NPY_BOOL);
+ }
+ else if (typ == &PyBytes_Type) {
+ /*
+ * TODO: This should be deprecated, and have special handling for
+ * dtype=bytes/"S" in coercion: It should not rely on "S0".
+ */
+ return PyArray_DescrFromType(NPY_STRING);
+ }
+ else if (typ == &PyUnicode_Type) {
+ /*
+ * TODO: This should be deprecated, and have special handling for
+ * dtype=str/"U" in coercion: It should not rely on "U0".
+ */
+ return PyArray_DescrFromType(NPY_UNICODE);
+ }
+ else if (typ == &PyMemoryView_Type) {
+ return PyArray_DescrFromType(NPY_VOID);
+ }
+ else if (typ == &PyBaseObject_Type) {
+ return PyArray_DescrFromType(NPY_OBJECT);
+ }
+ else {
+ PyArray_Descr *ret = _try_convert_from_dtype_attr(obj);
+ if ((PyObject *)ret != Py_NotImplemented) {
+ return ret;
}
- else {
- if (_arraydescr_from_dtype_attr(obj, at)) {
- /*
- * Using dtype attribute, *at may be NULL if a
- * RecursionError occurred.
- */
- if (*at == NULL) {
- goto error;
- }
- return NPY_SUCCEED;
- }
- /*
- * Note: this comes after _arraydescr_from_dtype_attr because the ctypes
- * type might override the dtype if numpy does not otherwise
- * support it.
- */
- if (npy_ctypes_check((PyTypeObject *)obj)) {
- *at = _arraydescr_from_ctypes_type((PyTypeObject *)obj);
- return *at ? NPY_SUCCEED : NPY_FAIL;
- }
+ Py_DECREF(ret);
+
+ /*
+ * Note: this comes after _try_convert_from_dtype_attr because the ctypes
+ * type might override the dtype if numpy does not otherwise
+ * support it.
+ */
+ ret = _try_convert_from_ctypes_type(typ);
+ if ((PyObject *)ret != Py_NotImplemented) {
+ return ret;
}
- goto finish;
+ Py_DECREF(ret);
+
+ /*
+ * All other classes are treated as object. This can be convenient
+ * to convey an intention of using it for a specific python type
+ * and possibly allow converting to a new type-specific dtype in the future. It may make sense to
+ * only allow this only within `dtype=...` keyword argument context
+ * in the future.
+ */
+ return PyArray_DescrFromType(NPY_OBJECT);
}
+}
- /* or a typecode string */
- if (PyUnicode_Check(obj)) {
- /* Allow unicode format strings: convert to bytes */
- int retval;
- PyObject *obj2;
- obj2 = PyUnicode_AsASCIIString(obj);
+static PyArray_Descr *
+_convert_from_str(PyObject *obj, int align);
+
+static PyArray_Descr *
+_convert_from_any(PyObject *obj, int align)
+{
+ /* default */
+ if (obj == Py_None) {
+ return PyArray_DescrFromType(NPY_DEFAULT_TYPE);
+ }
+ else if (PyArray_DescrCheck(obj)) {
+ PyArray_Descr *ret = (PyArray_Descr *)obj;
+ Py_INCREF(ret);
+ return ret;
+ }
+ else if (PyType_Check(obj)) {
+ return _convert_from_type(obj);
+ }
+ /* or a typecode string */
+ else if (PyBytes_Check(obj)) {
+ /* Allow bytes format strings: convert to unicode */
+ PyObject *obj2 = PyUnicode_FromEncodedObject(obj, NULL, NULL);
if (obj2 == NULL) {
/* Convert the exception into a TypeError */
- PyObject *err = PyErr_Occurred();
- if (PyErr_GivenExceptionMatches(err, PyExc_UnicodeEncodeError)) {
+ if (PyErr_ExceptionMatches(PyExc_UnicodeDecodeError)) {
PyErr_SetString(PyExc_TypeError,
"data type not understood");
}
- return NPY_FAIL;
+ return NULL;
}
- retval = PyArray_DescrConverter(obj2, at);
+ PyArray_Descr *ret = _convert_from_str(obj2, align);
Py_DECREF(obj2);
- return retval;
+ return ret;
}
-
- if (PyBytes_Check(obj)) {
- char *type = NULL;
- Py_ssize_t len = 0;
-
- /* Check for a string typecode. */
- if (PyBytes_AsStringAndSize(obj, &type, &len) < 0) {
- goto error;
- }
-
- /* Empty string is invalid */
- if (len == 0) {
- goto fail;
- }
-
- /* check for commas present or first (or second) element a digit */
- if (_check_for_commastring(type, len)) {
- *at = _convert_from_commastring(obj, 0);
- return (*at) ? NPY_SUCCEED : NPY_FAIL;
- }
-
- /* Process the endian character. '|' is replaced by '='*/
- switch (type[0]) {
- case '>':
- case '<':
- case '=':
- endian = type[0];
- ++type;
- --len;
- break;
-
- case '|':
- endian = '=';
- ++type;
- --len;
- break;
- }
-
- /* Just an endian character is invalid */
- if (len == 0) {
- goto fail;
- }
-
- /* Check for datetime format */
- if (is_datetime_typestr(type, len)) {
- *at = parse_dtype_from_datetime_typestr(type, len);
- if (*at == NULL) {
- return NPY_FAIL;
- }
- /* *at has byte order '=' at this point */
- if (!PyArray_ISNBO(endian)) {
- (*at)->byteorder = endian;
- }
- return NPY_SUCCEED;
- }
-
- /* A typecode like 'd' */
- if (len == 1) {
- /* Python byte string characters are unsigned */
- check_num = (unsigned char) type[0];
- }
- /* A kind + size like 'f8' */
- else {
- char *typeend = NULL;
- int kind;
-
- /* Parse the integer, make sure it's the rest of the string */
- elsize = (int)strtol(type + 1, &typeend, 10);
- if (typeend - type == len) {
-
- kind = type[0];
- switch (kind) {
- case NPY_STRINGLTR:
- case NPY_STRINGLTR2:
- check_num = NPY_STRING;
- break;
-
- /*
- * When specifying length of UNICODE
- * the number of characters is given to match
- * the STRING interface. Each character can be
- * more than one byte and itemsize must be
- * the number of bytes.
- */
- case NPY_UNICODELTR:
- check_num = NPY_UNICODE;
- elsize <<= 2;
- break;
-
- case NPY_VOIDLTR:
- check_num = NPY_VOID;
- break;
-
- default:
- if (elsize == 0) {
- check_num = NPY_NOTYPE+10;
- }
- /* Support for generic processing c8, i4, f8, etc...*/
- else {
- check_num = PyArray_TypestrConvert(elsize, kind);
- if (check_num == NPY_NOTYPE) {
- check_num += 10;
- }
- elsize = 0;
- }
- }
- }
- }
+ else if (PyUnicode_Check(obj)) {
+ return _convert_from_str(obj, align);
}
else if (PyTuple_Check(obj)) {
/* or a tuple */
- *at = _convert_from_tuple(obj, 0);
- if (*at == NULL){
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- goto fail;
- }
- return NPY_SUCCEED;
+ return _convert_from_tuple(obj, align);
}
else if (PyList_Check(obj)) {
/* or a list */
- *at = _convert_from_array_descr(obj,0);
- if (*at == NULL) {
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- goto fail;
- }
- return NPY_SUCCEED;
+ return _convert_from_array_descr(obj, align);
}
else if (PyDict_Check(obj) || PyDictProxy_Check(obj)) {
/* or a dictionary */
- *at = _convert_from_dict(obj,0);
- if (*at == NULL) {
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- goto fail;
- }
- return NPY_SUCCEED;
+ return _convert_from_dict(obj, align);
}
else if (PyArray_Check(obj)) {
- goto fail;
+ PyErr_SetString(PyExc_TypeError, "Cannot construct a dtype from an array");
+ return NULL;
}
else {
- if (_arraydescr_from_dtype_attr(obj, at)) {
- /*
- * Using dtype attribute, *at may be NULL if a
- * RecursionError occurred.
- */
- if (*at == NULL) {
- goto error;
- }
- return NPY_SUCCEED;
+ PyArray_Descr *ret = _try_convert_from_dtype_attr(obj);
+ if ((PyObject *)ret != Py_NotImplemented) {
+ return ret;
}
+ Py_DECREF(ret);
/*
- * Note: this comes after _arraydescr_from_dtype_attr because the ctypes
+ * Note: this comes after _try_convert_from_dtype_attr because the ctypes
* type might override the dtype if numpy does not otherwise
* support it.
*/
- if (npy_ctypes_check(Py_TYPE(obj))) {
- *at = _arraydescr_from_ctypes_type(Py_TYPE(obj));
- return *at ? NPY_SUCCEED : NPY_FAIL;
+ ret = _try_convert_from_ctypes_type(Py_TYPE(obj));
+ if ((PyObject *)ret != Py_NotImplemented) {
+ return ret;
}
+ Py_DECREF(ret);
+ PyErr_Format(PyExc_TypeError, "Cannot interpret '%R' as a data type", obj);
+ return NULL;
+ }
+}
+
+
+/*NUMPY_API
+ * Get typenum from an object -- None goes to NPY_DEFAULT_TYPE
+ * This function takes a Python object representing a type and converts it
+ * to a the correct PyArray_Descr * structure to describe the type.
+ *
+ * Many objects can be used to represent a data-type which in NumPy is
+ * quite a flexible concept.
+ *
+ * This is the central code that converts Python objects to
+ * Type-descriptor objects that are used throughout numpy.
+ *
+ * Returns a new reference in *at, but the returned should not be
+ * modified as it may be one of the canonical immutable objects or
+ * a reference to the input obj.
+ */
+NPY_NO_EXPORT int
+PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
+{
+ *at = _convert_from_any(obj, 0);
+ return (*at) ? NPY_SUCCEED : NPY_FAIL;
+}
+
+/** Convert a bytestring specification into a dtype */
+static PyArray_Descr *
+_convert_from_str(PyObject *obj, int align)
+{
+ /* Check for a string typecode. */
+ Py_ssize_t len = 0;
+ char const *type = PyUnicode_AsUTF8AndSize(obj, &len);
+ if (type == NULL) {
+ return NULL;
+ }
+
+ /* Empty string is invalid */
+ if (len == 0) {
+ goto fail;
+ }
+
+ /* check for commas present or first (or second) element a digit */
+ if (_check_for_commastring(type, len)) {
+ return _convert_from_commastring(obj, align);
+ }
+
+ /* Process the endian character. '|' is replaced by '='*/
+ char endian = '=';
+ switch (type[0]) {
+ case '>':
+ case '<':
+ case '=':
+ endian = type[0];
+ ++type;
+ --len;
+ break;
+
+ case '|':
+ endian = '=';
+ ++type;
+ --len;
+ break;
+ }
+
+ /* Just an endian character is invalid */
+ if (len == 0) {
goto fail;
}
+
+ /* Check for datetime format */
+ if (is_datetime_typestr(type, len)) {
+ PyArray_Descr *ret = parse_dtype_from_datetime_typestr(type, len);
+ if (ret == NULL) {
+ return NULL;
+ }
+ /* ret has byte order '=' at this point */
+ if (!PyArray_ISNBO(endian)) {
+ ret->byteorder = endian;
+ }
+ return ret;
+ }
+
+ int check_num = NPY_NOTYPE + 10;
+ int elsize = 0;
+ /* A typecode like 'd' */
+ if (len == 1) {
+ /* Python byte string characters are unsigned */
+ check_num = (unsigned char) type[0];
+ }
+ /* A kind + size like 'f8' */
+ else {
+ char *typeend = NULL;
+ int kind;
+
+ /* Parse the integer, make sure it's the rest of the string */
+ elsize = (int)strtol(type + 1, &typeend, 10);
+ if (typeend - type == len) {
+
+ kind = type[0];
+ switch (kind) {
+ case NPY_STRINGLTR:
+ case NPY_STRINGLTR2:
+ check_num = NPY_STRING;
+ break;
+
+ /*
+ * When specifying length of UNICODE
+ * the number of characters is given to match
+ * the STRING interface. Each character can be
+ * more than one byte and itemsize must be
+ * the number of bytes.
+ */
+ case NPY_UNICODELTR:
+ check_num = NPY_UNICODE;
+ elsize <<= 2;
+ break;
+
+ case NPY_VOIDLTR:
+ check_num = NPY_VOID;
+ break;
+
+ default:
+ if (elsize == 0) {
+ check_num = NPY_NOTYPE+10;
+ }
+ /* Support for generic processing c8, i4, f8, etc...*/
+ else {
+ check_num = PyArray_TypestrConvert(elsize, kind);
+ if (check_num == NPY_NOTYPE) {
+ check_num += 10;
+ }
+ elsize = 0;
+ }
+ }
+ }
+ }
+
if (PyErr_Occurred()) {
goto fail;
}
-finish:
+ PyArray_Descr *ret;
if ((check_num == NPY_NOTYPE + 10) ||
- (*at = PyArray_DescrFromType(check_num)) == NULL) {
+ (ret = PyArray_DescrFromType(check_num)) == NULL) {
PyErr_Clear();
/* Now check to see if the object is registered in typeDict */
- if (typeDict != NULL) {
- PyObject *item = NULL;
-#if defined(NPY_PY3K)
- if (PyBytes_Check(obj)) {
- PyObject *tmp;
- tmp = PyUnicode_FromEncodedObject(obj, "ascii", "strict");
- if (tmp == NULL) {
- goto fail;
- }
- item = PyDict_GetItem(typeDict, tmp);
- Py_DECREF(tmp);
- }
- else {
- item = PyDict_GetItem(typeDict, obj);
+ if (typeDict == NULL) {
+ goto fail;
+ }
+ PyObject *item = PyDict_GetItemWithError(typeDict, obj);
+ if (item == NULL) {
+ if (PyErr_Occurred()) {
+ return NULL;
}
-#else
- item = PyDict_GetItem(typeDict, obj);
-#endif
- if (item) {
- /* Check for a deprecated Numeric-style typecode */
- if (PyBytes_Check(obj)) {
- char *type = NULL;
- Py_ssize_t len = 0;
- char *dep_tps[] = {"Bool", "Complex", "Float", "Int",
- "Object0", "String0", "Timedelta64",
- "Unicode0", "UInt", "Void0"};
- int ndep_tps = sizeof(dep_tps) / sizeof(dep_tps[0]);
- int i;
-
- if (PyBytes_AsStringAndSize(obj, &type, &len) < 0) {
- goto error;
- }
- for (i = 0; i < ndep_tps; ++i) {
- char *dep_tp = dep_tps[i];
-
- if (strncmp(type, dep_tp, strlen(dep_tp)) == 0) {
- if (DEPRECATE("Numeric-style type codes are "
- "deprecated and will result in "
- "an error in the future.") < 0) {
- goto fail;
- }
- }
- }
+ goto fail;
+ }
+
+ /* Check for a deprecated Numeric-style typecode */
+ char *dep_tps[] = {"Bool", "Complex", "Float", "Int",
+ "Object0", "String0", "Timedelta64",
+ "Unicode0", "UInt", "Void0"};
+ int ndep_tps = sizeof(dep_tps) / sizeof(dep_tps[0]);
+ for (int i = 0; i < ndep_tps; ++i) {
+ char *dep_tp = dep_tps[i];
+
+ if (strncmp(type, dep_tp, strlen(dep_tp)) == 0) {
+ if (DEPRECATE("Numeric-style type codes are "
+ "deprecated and will result in "
+ "an error in the future.") < 0) {
+ goto fail;
}
- return PyArray_DescrConverter(item, at);
}
}
- goto fail;
+ /*
+ * Probably only ever dispatches to `_convert_from_type`, but who
+ * knows what users are injecting into `np.typeDict`.
+ */
+ return _convert_from_any(item, align);
}
- if (PyDataType_ISUNSIZED(*at) && (*at)->elsize != elsize) {
- PyArray_DESCR_REPLACE(*at);
- if (*at == NULL) {
- goto error;
+ if (PyDataType_ISUNSIZED(ret) && ret->elsize != elsize) {
+ PyArray_DESCR_REPLACE(ret);
+ if (ret == NULL) {
+ return NULL;
}
- (*at)->elsize = elsize;
+ ret->elsize = elsize;
}
if (endian != '=' && PyArray_ISNBO(endian)) {
endian = '=';
}
- if (endian != '=' && (*at)->byteorder != '|'
- && (*at)->byteorder != endian) {
- PyArray_DESCR_REPLACE(*at);
- if (*at == NULL) {
- goto error;
+ if (endian != '=' && ret->byteorder != '|' && ret->byteorder != endian) {
+ PyArray_DESCR_REPLACE(ret);
+ if (ret == NULL) {
+ return NULL;
}
- (*at)->byteorder = endian;
+ ret->byteorder = endian;
}
- return NPY_SUCCEED;
+ return ret;
fail:
- if (PyBytes_Check(obj)) {
- PyErr_Format(PyExc_TypeError,
- "data type \"%s\" not understood", PyBytes_AS_STRING(obj));
- }
- else {
- PyErr_SetString(PyExc_TypeError,
- "data type not understood");
- }
-
-error:
- *at = NULL;
- return NPY_FAIL;
+ PyErr_Format(PyExc_TypeError, "data type %R not understood", obj);
+ return NULL;
}
/** Array Descr Objects for dynamic types **/
@@ -2038,7 +2015,7 @@ _arraydescr_isnative(PyArray_Descr *self)
int offset;
Py_ssize_t pos = 0;
while (PyDict_Next(self->fields, &pos, &key, &value)) {
- if NPY_TITLE_KEY(key, value) {
+ if (NPY_TITLE_KEY(key, value)) {
continue;
}
if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) {
@@ -2179,7 +2156,14 @@ arraydescr_names_set(PyArray_Descr *self, PyObject *val)
self->hash = -1;
/* Update dictionary keys in fields */
new_names = PySequence_Tuple(val);
+ if (new_names == NULL) {
+ return -1;
+ }
new_fields = PyDict_New();
+ if (new_fields == NULL) {
+ Py_DECREF(new_names);
+ return -1;
+ }
for (i = 0; i < N; i++) {
PyObject *key;
PyObject *item;
@@ -2187,20 +2171,35 @@ arraydescr_names_set(PyArray_Descr *self, PyObject *val)
int ret;
key = PyTuple_GET_ITEM(self->names, i);
/* Borrowed references to item and new_key */
- item = PyDict_GetItem(self->fields, key);
+ item = PyDict_GetItemWithError(self->fields, key);
+ if (item == NULL) {
+ if (!PyErr_Occurred()) {
+ /* fields was missing the name it claimed to contain */
+ PyErr_BadInternalCall();
+ }
+ Py_DECREF(new_names);
+ Py_DECREF(new_fields);
+ return -1;
+ }
new_key = PyTuple_GET_ITEM(new_names, i);
/* Check for duplicates */
ret = PyDict_Contains(new_fields, new_key);
- if (ret != 0) {
- if (ret < 0) {
- PyErr_Clear();
- }
+ if (ret < 0) {
+ Py_DECREF(new_names);
+ Py_DECREF(new_fields);
+ return -1;
+ }
+ else if (ret != 0) {
PyErr_SetString(PyExc_ValueError, "Duplicate field names given.");
Py_DECREF(new_names);
Py_DECREF(new_fields);
return -1;
}
- PyDict_SetItem(new_fields, new_key, item);
+ if (PyDict_SetItem(new_fields, new_key, item) < 0) {
+ Py_DECREF(new_names);
+ Py_DECREF(new_fields);
+ return -1;
+ }
}
/* Replace names */
@@ -2281,12 +2280,8 @@ arraydescr_new(PyTypeObject *NPY_UNUSED(subtype),
return NULL;
}
- if (align) {
- if (!PyArray_DescrAlignConverter(odescr, &conv)) {
- return NULL;
- }
- }
- else if (!PyArray_DescrConverter(odescr, &conv)) {
+ conv = _convert_from_any(odescr, align);
+ if (conv == NULL) {
return NULL;
}
@@ -2439,7 +2434,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
}
obj = PyUString_FromFormat("%c%d",self->kind, elsize);
}
- PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(Nii)", obj, 0, 1));
+ PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(NOO)", obj, Py_False, Py_True));
/*
* Now return the state which is at least byteorder,
@@ -2530,7 +2525,7 @@ _descr_find_object(PyArray_Descr *self)
Py_ssize_t pos = 0;
while (PyDict_Next(self->fields, &pos, &key, &value)) {
- if NPY_TITLE_KEY(key, value) {
+ if (NPY_TITLE_KEY(key, value)) {
continue;
}
if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) {
@@ -2639,8 +2634,12 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
if (fields != Py_None) {
PyObject *key, *list;
key = PyInt_FromLong(-1);
- list = PyDict_GetItem(fields, key);
+ list = PyDict_GetItemWithError(fields, key);
if (!list) {
+ if (!PyErr_Occurred()) {
+ /* fields was missing the name it claimed to contain */
+ PyErr_BadInternalCall();
+ }
return NULL;
}
Py_INCREF(list);
@@ -2733,11 +2732,7 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
subarray_shape = PyTuple_GET_ITEM(subarray, 1);
if (PyNumber_Check(subarray_shape)) {
PyObject *tmp;
-#if defined(NPY_PY3K)
tmp = PyNumber_Long(subarray_shape);
-#else
- tmp = PyNumber_Int(subarray_shape);
-#endif
if (tmp == NULL) {
return NULL;
}
@@ -2792,7 +2787,6 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
}
}
else {
-#if defined(NPY_PY3K)
/*
* To support pickle.load(f, encoding='bytes') for loading Py2
* generated pickles on Py3, we need to be more lenient and convert
@@ -2816,8 +2810,12 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
for (i = 0; i < PyTuple_GET_SIZE(names); ++i) {
name = PyTuple_GET_ITEM(names, i);
- field = PyDict_GetItem(fields, name);
+ field = PyDict_GetItemWithError(fields, name);
if (!field) {
+ if (!PyErr_Occurred()) {
+ /* fields was missing the name it claimed to contain */
+ PyErr_BadInternalCall();
+ }
return NULL;
}
@@ -2837,11 +2835,6 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
return NULL;
}
}
-#else
- PyErr_Format(PyExc_ValueError,
- "non-string names in Numpy dtype unpickling");
- return NULL;
-#endif
}
}
@@ -2930,35 +2923,8 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
NPY_NO_EXPORT int
PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at)
{
- if (PyDict_Check(obj) || PyDictProxy_Check(obj)) {
- *at = _convert_from_dict(obj, 1);
- }
- else if (PyBytes_Check(obj)) {
- *at = _convert_from_commastring(obj, 1);
- }
- else if (PyUnicode_Check(obj)) {
- PyObject *tmp;
- tmp = PyUnicode_AsASCIIString(obj);
- *at = _convert_from_commastring(tmp, 1);
- Py_DECREF(tmp);
- }
- else if (PyTuple_Check(obj)) {
- *at = _convert_from_tuple(obj, 1);
- }
- else if (PyList_Check(obj)) {
- *at = _convert_from_array_descr(obj, 1);
- }
- else {
- return PyArray_DescrConverter(obj, at);
- }
- if (*at == NULL) {
- if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError,
- "data-type-descriptor not understood");
- }
- return NPY_FAIL;
- }
- return NPY_SUCCEED;
+ *at = _convert_from_any(obj, 1);
+ return (*at) ? NPY_SUCCEED : NPY_FAIL;
}
/*NUMPY_API
@@ -2969,32 +2935,13 @@ PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at)
NPY_NO_EXPORT int
PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at)
{
- if (PyDict_Check(obj) || PyDictProxy_Check(obj)) {
- *at = _convert_from_dict(obj, 1);
- }
- else if (PyBytes_Check(obj)) {
- *at = _convert_from_commastring(obj, 1);
- }
- else if (PyUnicode_Check(obj)) {
- PyObject *tmp;
- tmp = PyUnicode_AsASCIIString(obj);
- *at = _convert_from_commastring(tmp, 1);
- Py_DECREF(tmp);
- }
- else if (PyList_Check(obj)) {
- *at = _convert_from_array_descr(obj, 1);
+ if (obj == Py_None) {
+ *at = NULL;
+ return NPY_SUCCEED;
}
else {
- return PyArray_DescrConverter2(obj, at);
+ return PyArray_DescrAlignConverter(obj, at);
}
- if (*at == NULL) {
- if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError,
- "data-type-descriptor not understood");
- }
- return NPY_FAIL;
- }
- return NPY_SUCCEED;
}
@@ -3052,7 +2999,7 @@ PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian)
newfields = PyDict_New();
/* make new dictionary with replaced PyArray_Descr Objects */
while (PyDict_Next(self->fields, &pos, &key, &value)) {
- if NPY_TITLE_KEY(key, value) {
+ if (NPY_TITLE_KEY(key, value)) {
continue;
}
if (!PyUString_Check(key) || !PyTuple_Check(value) ||
@@ -3076,8 +3023,13 @@ PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian)
Py_INCREF(old);
PyTuple_SET_ITEM(newvalue, i, old);
}
- PyDict_SetItem(newfields, key, newvalue);
+ int ret = PyDict_SetItem(newfields, key, newvalue);
Py_DECREF(newvalue);
+ if (ret < 0) {
+ Py_DECREF(newfields);
+ Py_DECREF(new);
+ return NULL;
+ }
}
Py_DECREF(new->fields);
new->fields = newfields;
@@ -3214,71 +3166,41 @@ arraydescr_str(PyArray_Descr *dtype)
static PyObject *
arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op)
{
- PyArray_Descr *new = NULL;
- PyObject *result = Py_NotImplemented;
- if (!PyArray_DescrCheck(other)) {
- if (PyArray_DescrConverter(other, &new) == NPY_FAIL) {
- return NULL;
- }
- }
- else {
- new = (PyArray_Descr *)other;
- Py_INCREF(new);
+ PyArray_Descr *new = _convert_from_any(other, 0);
+ if (new == NULL) {
+ return NULL;
}
+
+ npy_bool ret;
switch (cmp_op) {
case Py_LT:
- if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) {
- result = Py_True;
- }
- else {
- result = Py_False;
- }
- break;
+ ret = !PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new);
+ Py_DECREF(new);
+ return PyBool_FromLong(ret);
case Py_LE:
- if (PyArray_CanCastTo(self, new)) {
- result = Py_True;
- }
- else {
- result = Py_False;
- }
- break;
+ ret = PyArray_CanCastTo(self, new);
+ Py_DECREF(new);
+ return PyBool_FromLong(ret);
case Py_EQ:
- if (PyArray_EquivTypes(self, new)) {
- result = Py_True;
- }
- else {
- result = Py_False;
- }
- break;
+ ret = PyArray_EquivTypes(self, new);
+ Py_DECREF(new);
+ return PyBool_FromLong(ret);
case Py_NE:
- if (PyArray_EquivTypes(self, new))
- result = Py_False;
- else
- result = Py_True;
- break;
+ ret = !PyArray_EquivTypes(self, new);
+ Py_DECREF(new);
+ return PyBool_FromLong(ret);
case Py_GT:
- if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) {
- result = Py_True;
- }
- else {
- result = Py_False;
- }
- break;
+ ret = !PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self);
+ Py_DECREF(new);
+ return PyBool_FromLong(ret);
case Py_GE:
- if (PyArray_CanCastTo(new, self)) {
- result = Py_True;
- }
- else {
- result = Py_False;
- }
- break;
+ ret = PyArray_CanCastTo(new, self);
+ Py_DECREF(new);
+ return PyBool_FromLong(ret);
default:
- result = Py_NotImplemented;
+ Py_DECREF(new);
+ Py_RETURN_NOTIMPLEMENTED;
}
-
- Py_XDECREF(new);
- Py_INCREF(result);
- return result;
}
static int
@@ -3290,20 +3212,7 @@ descr_nonzero(PyObject *NPY_UNUSED(self))
}
static PyNumberMethods descr_as_number = {
- (binaryfunc)0, /* nb_add */
- (binaryfunc)0, /* nb_subtract */
- (binaryfunc)0, /* nb_multiply */
- #if defined(NPY_PY3K)
- #else
- (binaryfunc)0, /* nb_divide */
- #endif
- (binaryfunc)0, /* nb_remainder */
- (binaryfunc)0, /* nb_divmod */
- (ternaryfunc)0, /* nb_power */
- (unaryfunc)0, /* nb_negative */
- (unaryfunc)0, /* nb_positive */
- (unaryfunc)0, /* nb_absolute */
- (inquiry)descr_nonzero, /* nb_nonzero */
+ .nb_bool = (inquiry)descr_nonzero,
};
/*************************************************************************
@@ -3336,7 +3245,7 @@ descr_repeat(PyObject *self, Py_ssize_t length)
if (tup == NULL) {
return NULL;
}
- PyArray_DescrConverter(tup, &new);
+ new = _convert_from_any(tup, 0);
Py_DECREF(tup);
return (PyObject *)new;
}
@@ -3345,20 +3254,7 @@ static int
_check_has_fields(PyArray_Descr *self)
{
if (!PyDataType_HASFIELDS(self)) {
- PyObject *astr = arraydescr_str(self);
- if (astr == NULL) {
- return -1;
- }
-#if defined(NPY_PY3K)
- {
- PyObject *bstr = PyUnicode_AsUnicodeEscapeString(astr);
- Py_DECREF(astr);
- astr = bstr;
- }
-#endif
- PyErr_Format(PyExc_KeyError,
- "There are no fields in dtype %s.", PyBytes_AsString(astr));
- Py_DECREF(astr);
+ PyErr_Format(PyExc_KeyError, "There are no fields in dtype %S.", self);
return -1;
}
else {
@@ -3369,26 +3265,15 @@ _check_has_fields(PyArray_Descr *self)
static PyObject *
_subscript_by_name(PyArray_Descr *self, PyObject *op)
{
- PyObject *obj = PyDict_GetItem(self->fields, op);
- PyObject *descr;
- PyObject *s;
-
+ PyObject *obj = PyDict_GetItemWithError(self->fields, op);
if (obj == NULL) {
- if (PyUnicode_Check(op)) {
- s = PyUnicode_AsUnicodeEscapeString(op);
- }
- else {
- s = op;
- }
-
- PyErr_Format(PyExc_KeyError,
- "Field named \'%s\' not found.", PyBytes_AsString(s));
- if (s != op) {
- Py_DECREF(s);
+ if (!PyErr_Occurred()) {
+ PyErr_Format(PyExc_KeyError,
+ "Field named %R not found.", op);
}
return NULL;
}
- descr = PyTuple_GET_ITEM(obj, 0);
+ PyObject *descr = PyTuple_GET_ITEM(obj, 0);
Py_INCREF(descr);
return descr;
}
@@ -3462,9 +3347,11 @@ arraydescr_field_subset_view(PyArray_Descr *self, PyObject *ind)
*/
PyTuple_SET_ITEM(names, i, name);
- tup = PyDict_GetItem(self->fields, name);
+ tup = PyDict_GetItemWithError(self->fields, name);
if (tup == NULL) {
- PyErr_SetObject(PyExc_KeyError, name);
+ if (!PyErr_Occurred()) {
+ PyErr_SetObject(PyExc_KeyError, name);
+ }
goto fail;
}
@@ -3570,61 +3457,20 @@ static PyMappingMethods descr_as_mapping = {
/****************** End of Mapping Protocol ******************************/
NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.dtype", /* tp_name */
- sizeof(PyArray_Descr), /* tp_basicsize */
- 0, /* tp_itemsize */
+ .tp_name = "numpy.dtype",
+ .tp_basicsize = sizeof(PyArray_Descr),
/* methods */
- (destructor)arraydescr_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- (void *)0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- (reprfunc)arraydescr_repr, /* tp_repr */
- &descr_as_number, /* tp_as_number */
- &descr_as_sequence, /* tp_as_sequence */
- &descr_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- (reprfunc)arraydescr_str, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- arraydescr_methods, /* tp_methods */
- arraydescr_members, /* tp_members */
- arraydescr_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- arraydescr_new, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_dealloc = (destructor)arraydescr_dealloc,
+ .tp_repr = (reprfunc)arraydescr_repr,
+ .tp_as_number = &descr_as_number,
+ .tp_as_sequence = &descr_as_sequence,
+ .tp_as_mapping = &descr_as_mapping,
+ .tp_str = (reprfunc)arraydescr_str,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_richcompare = (richcmpfunc)arraydescr_richcompare,
+ .tp_methods = arraydescr_methods,
+ .tp_members = arraydescr_members,
+ .tp_getset = arraydescr_getsets,
+ .tp_new = arraydescr_new,
};
diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h
index 6024c5e77..fc9e0895b 100644
--- a/numpy/core/src/multiarray/descriptor.h
+++ b/numpy/core/src/multiarray/descriptor.h
@@ -7,8 +7,8 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get(PyArray_Descr *self);
NPY_NO_EXPORT PyObject *
array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args);
-int
-_arraydescr_from_dtype_attr(PyObject *obj, PyArray_Descr **newdescr);
+NPY_NO_EXPORT PyArray_Descr *
+_arraydescr_try_convert_from_dtype_attr(PyObject *obj);
NPY_NO_EXPORT int
@@ -26,6 +26,6 @@ is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype);
NPY_NO_EXPORT PyArray_Descr *
arraydescr_field_subset_view(PyArray_Descr *self, PyObject *ind);
-extern NPY_NO_EXPORT char *_datetime_strings[];
+extern NPY_NO_EXPORT char const *_datetime_strings[];
#endif
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index 1694596e9..282cdad28 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -1565,8 +1565,8 @@ Dragon4(BigInt *bigints, const npy_int32 exponent,
/* Options struct for easy passing of Dragon4 options.
*
* scientific - boolean controlling whether scientific notation is used
- * digit_mode - whether to use unique or fixed fracional output
- * cutoff_mode - whether 'precision' refers to toal digits, or digits past
+ * digit_mode - whether to use unique or fixed fractional output
+ * cutoff_mode - whether 'precision' refers to to all digits, or digits past
* the decimal point.
* precision - When negative, prints as many digits as needed for a unique
* number. When positive specifies the maximum number of
@@ -3183,19 +3183,19 @@ Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode,
opt.exp_digits = -1;
if (PyArray_IsScalar(obj, Half)) {
- npy_half x = ((PyHalfScalarObject *)obj)->obval;
+ npy_half x = PyArrayScalar_VAL(obj, Half);
return Dragon4_Positional_Half_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Float)) {
- npy_float x = ((PyFloatScalarObject *)obj)->obval;
+ npy_float x = PyArrayScalar_VAL(obj, Float);
return Dragon4_Positional_Float_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Double)) {
- npy_double x = ((PyDoubleScalarObject *)obj)->obval;
+ npy_double x = PyArrayScalar_VAL(obj, Double);
return Dragon4_Positional_Double_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, LongDouble)) {
- npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval;
+ npy_longdouble x = PyArrayScalar_VAL(obj, LongDouble);
return Dragon4_Positional_LongDouble_opt(&x, &opt);
}
@@ -3224,19 +3224,19 @@ Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision,
opt.exp_digits = exp_digits;
if (PyArray_IsScalar(obj, Half)) {
- npy_half x = ((PyHalfScalarObject *)obj)->obval;
+ npy_half x = PyArrayScalar_VAL(obj, Half);
return Dragon4_Scientific_Half_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Float)) {
- npy_float x = ((PyFloatScalarObject *)obj)->obval;
+ npy_float x = PyArrayScalar_VAL(obj, Float);
return Dragon4_Scientific_Float_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Double)) {
- npy_double x = ((PyDoubleScalarObject *)obj)->obval;
+ npy_double x = PyArrayScalar_VAL(obj, Double);
return Dragon4_Scientific_Double_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, LongDouble)) {
- npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval;
+ npy_longdouble x = PyArrayScalar_VAL(obj, LongDouble);
return Dragon4_Scientific_LongDouble_opt(&x, &opt);
}
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index ef0dd4a01..ecaa680ec 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -312,6 +312,7 @@ typedef struct {
NpyAuxData *wrappeddata, *todata, *fromdata;
npy_intp src_itemsize, dst_itemsize;
char *bufferin, *bufferout;
+ npy_bool init_dest, out_needs_api;
} _align_wrap_data;
/* transfer data free function */
@@ -372,6 +373,9 @@ static NpyAuxData *_align_wrap_data_clone(NpyAuxData *data)
}
}
+ newdata->init_dest = d->init_dest;
+ newdata->out_needs_api = d->out_needs_api;
+
return (NpyAuxData *)newdata;
}
@@ -391,57 +395,26 @@ _strided_to_strided_contig_align_wrap(char *dst, npy_intp dst_stride,
*todata = d->todata,
*fromdata = d->fromdata;
char *bufferin = d->bufferin, *bufferout = d->bufferout;
+ npy_bool init_dest = d->init_dest, out_needs_api = d->out_needs_api;
for(;;) {
- if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
- tobuffer(bufferin, inner_src_itemsize, src, src_stride,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- src_itemsize, todata);
- wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- inner_src_itemsize, wrappeddata);
- frombuffer(dst, dst_stride, bufferout, dst_itemsize,
- NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
- dst_itemsize, fromdata);
- N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE;
- src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride;
- dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride;
- }
- else {
- tobuffer(bufferin, inner_src_itemsize, src, src_stride, N,
- src_itemsize, todata);
- wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize, N,
- inner_src_itemsize, wrappeddata);
- frombuffer(dst, dst_stride, bufferout, dst_itemsize, N,
- dst_itemsize, fromdata);
+ /*
+ * The caller does not know if a previous call resulted in a Python
+ * exception. Much of the Python API is unsafe while an exception is in
+ * flight, so just skip all the work. Someone higher in the call stack
+ * will check for errors and propagate them.
+ */
+ if (out_needs_api && PyErr_Occurred()) {
return;
}
- }
-}
-
-static void
-_strided_to_strided_contig_align_wrap_init_dest(char *dst, npy_intp dst_stride,
- char *src, npy_intp src_stride,
- npy_intp N, npy_intp src_itemsize,
- NpyAuxData *data)
-{
- _align_wrap_data *d = (_align_wrap_data *)data;
- PyArray_StridedUnaryOp *wrapped = d->wrapped,
- *tobuffer = d->tobuffer,
- *frombuffer = d->frombuffer;
- npy_intp inner_src_itemsize = d->src_itemsize,
- dst_itemsize = d->dst_itemsize;
- NpyAuxData *wrappeddata = d->wrappeddata,
- *todata = d->todata,
- *fromdata = d->fromdata;
- char *bufferin = d->bufferin, *bufferout = d->bufferout;
-
- for(;;) {
if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
tobuffer(bufferin, inner_src_itemsize, src, src_stride,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
src_itemsize, todata);
- memset(bufferout, 0, dst_itemsize*NPY_LOWLEVEL_BUFFER_BLOCKSIZE);
+ if (init_dest) {
+ memset(bufferout, 0,
+ dst_itemsize*NPY_LOWLEVEL_BUFFER_BLOCKSIZE);
+ }
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
inner_src_itemsize, wrappeddata);
@@ -455,7 +428,9 @@ _strided_to_strided_contig_align_wrap_init_dest(char *dst, npy_intp dst_stride,
else {
tobuffer(bufferin, inner_src_itemsize, src, src_stride, N,
src_itemsize, todata);
- memset(bufferout, 0, dst_itemsize*N);
+ if (init_dest) {
+ memset(bufferout, 0, dst_itemsize*N);
+ }
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize, N,
inner_src_itemsize, wrappeddata);
frombuffer(dst, dst_stride, bufferout, dst_itemsize, N,
@@ -477,6 +452,7 @@ _strided_to_strided_contig_align_wrap_init_dest(char *dst, npy_intp dst_stride,
* wrapped - contig to contig transfer function being wrapped
* wrappeddata - data for wrapped
* init_dest - 1 means to memset the dest buffer to 0 before calling wrapped.
+ * out_needs_api - if NPY_TRUE, check for (and break on) Python API errors.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
@@ -487,6 +463,7 @@ wrap_aligned_contig_transfer_function(
PyArray_StridedUnaryOp *frombuffer, NpyAuxData *fromdata,
PyArray_StridedUnaryOp *wrapped, NpyAuxData *wrappeddata,
int init_dest,
+ int out_needs_api,
PyArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata)
{
@@ -519,14 +496,11 @@ wrap_aligned_contig_transfer_function(
data->bufferin = (char *)data + basedatasize;
data->bufferout = data->bufferin +
NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_itemsize;
+ data->init_dest = (npy_bool) init_dest;
+ data->out_needs_api = (npy_bool) out_needs_api;
/* Set the function and data */
- if (init_dest) {
- *out_stransfer = &_strided_to_strided_contig_align_wrap_init_dest;
- }
- else {
- *out_stransfer = &_strided_to_strided_contig_align_wrap;
- }
+ *out_stransfer = &_strided_to_strided_contig_align_wrap;
*out_transferdata = (NpyAuxData *)data;
return NPY_SUCCEED;
@@ -1171,6 +1145,7 @@ get_datetime_to_unicode_transfer_function(int aligned,
frombuffer, fromdata,
caststransfer, castdata,
PyDataType_FLAGCHK(str_dtype, NPY_NEEDS_INIT),
+ *out_needs_api,
out_stransfer, out_transferdata) != NPY_SUCCEED) {
NPY_AUXDATA_FREE(castdata);
NPY_AUXDATA_FREE(todata);
@@ -1293,6 +1268,7 @@ get_unicode_to_datetime_transfer_function(int aligned,
frombuffer, fromdata,
caststransfer, castdata,
PyDataType_FLAGCHK(dst_dtype, NPY_NEEDS_INIT),
+ *out_needs_api,
out_stransfer, out_transferdata) != NPY_SUCCEED) {
Py_DECREF(str_dtype);
NPY_AUXDATA_FREE(castdata);
@@ -1613,6 +1589,7 @@ get_cast_transfer_function(int aligned,
frombuffer, fromdata,
caststransfer, castdata,
PyDataType_FLAGCHK(dst_dtype, NPY_NEEDS_INIT),
+ *out_needs_api,
out_stransfer, out_transferdata) != NPY_SUCCEED) {
NPY_AUXDATA_FREE(castdata);
NPY_AUXDATA_FREE(todata);
@@ -3832,8 +3809,8 @@ PyArray_CastRawArrays(npy_intp count,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
- char *data, npy_intp *strides,
+PyArray_PrepareOneRawArrayIter(int ndim, npy_intp const *shape,
+ char *data, npy_intp const *strides,
int *out_ndim, npy_intp *out_shape,
char **out_data, npy_intp *out_strides)
{
@@ -3953,9 +3930,9 @@ PyArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
- char *dataA, npy_intp *stridesA,
- char *dataB, npy_intp *stridesB,
+PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp const *shape,
+ char *dataA, npy_intp const *stridesA,
+ char *dataB, npy_intp const *stridesB,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB)
@@ -4077,10 +4054,10 @@ PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
- char *dataA, npy_intp *stridesA,
- char *dataB, npy_intp *stridesB,
- char *dataC, npy_intp *stridesC,
+PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp const *shape,
+ char *dataA, npy_intp const *stridesA,
+ char *dataB, npy_intp const *stridesB,
+ char *dataC, npy_intp const *stridesC,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB,
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index e7bbc3d0b..1cc557825 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -107,7 +107,7 @@
*/
static void
@name@_sum_of_products_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
char *data0 = dataptr[0];
@@ -206,7 +206,7 @@ static void
static void
@name@_sum_of_products_contig_one(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@type@ *data_out = (@type@ *)dataptr[1];
@@ -268,7 +268,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@type@ *data1 = (@type@ *)dataptr[1];
@@ -354,7 +354,7 @@ finish_after_unrolled_loop:
/* Some extra specializations for the two operand case */
static void
@name@_sum_of_products_stride0_contig_outcontig_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
@type@ *data1 = (@type@ *)dataptr[1];
@@ -483,7 +483,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_stride0_outcontig_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
@@ -567,7 +567,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_contig_outstride0_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@type@ *data1 = (@type@ *)dataptr[1];
@@ -727,7 +727,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_stride0_contig_outstride0_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
@type@ *data1 = (@type@ *)dataptr[1];
@@ -826,7 +826,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_stride0_outstride0_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
@@ -927,7 +927,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_three(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@type@ *data1 = (@type@ *)dataptr[1];
@@ -971,7 +971,7 @@ static void
static void
@name@_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_@noplabel@ (%d)\n",
(int)count);
@@ -1024,7 +1024,7 @@ static void
static void
@name@_sum_of_products_contig_outstride0_one(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if @complex@
@temptype@ accum_re = 0, accum_im = 0;
@@ -1201,7 +1201,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if @complex@
@temptype@ accum_re = 0, accum_im = 0;
@@ -1319,7 +1319,7 @@ static void
static void
bool_sum_of_products_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if (@nop@ <= 3)
char *data0 = dataptr[0];
@@ -1376,7 +1376,7 @@ bool_sum_of_products_@noplabel@(int nop, char **dataptr,
static void
bool_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if (@nop@ <= 3)
char *data0 = dataptr[0];
@@ -1484,7 +1484,7 @@ finish_after_unrolled_loop:
static void
bool_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
npy_bool accum = 0;
@@ -1538,7 +1538,7 @@ bool_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
/**end repeat**/
-typedef void (*sum_of_products_fn)(int, char **, npy_intp *, npy_intp);
+typedef void (*sum_of_products_fn)(int, char **, npy_intp const*, npy_intp);
/* These tables need to match up with the type enum */
static sum_of_products_fn
@@ -1720,7 +1720,7 @@ static sum_of_products_fn _unspecialized_table[NPY_NTYPES][4] = {
static sum_of_products_fn
get_sum_of_products_function(int nop, int type_num,
- npy_intp itemsize, npy_intp *fixed_strides)
+ npy_intp itemsize, npy_intp const *fixed_strides)
{
int iop;
@@ -1876,7 +1876,7 @@ parse_operand_subscripts(char *subscripts, int length,
* later where it matters the char is cast to a signed char.
*/
for (idim = 0; idim < ndim - 1; ++idim) {
- int label = op_labels[idim];
+ int label = (signed char)op_labels[idim];
/* If it is a proper label, find any duplicates of it. */
if (label > 0) {
/* Search for the next matching label. */
@@ -2152,6 +2152,11 @@ get_combined_dims_view(PyArrayObject *op, int iop, char *labels)
}
/* A repeated label, find the original one and merge them. */
else {
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
int i = icombinemap[idim + label];
icombinemap[idim] = -1;
@@ -2164,6 +2169,9 @@ get_combined_dims_view(PyArrayObject *op, int iop, char *labels)
return NULL;
}
new_strides[i] += stride;
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
}
}
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index a66b9d40d..d5f24e75a 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -727,47 +727,25 @@ arrayflags_print(PyArrayFlagsObject *self)
);
}
-static int
-arrayflags_compare(PyArrayFlagsObject *self, PyArrayFlagsObject *other)
-{
- if (self->flags == other->flags) {
- return 0;
- }
- else if (self->flags < other->flags) {
- return -1;
- }
- else {
- return 1;
- }
-}
-
-
static PyObject*
arrayflags_richcompare(PyObject *self, PyObject *other, int cmp_op)
{
- PyObject *result = Py_NotImplemented;
- int cmp;
-
- if (cmp_op != Py_EQ && cmp_op != Py_NE) {
- PyErr_SetString(PyExc_TypeError,
- "undefined comparison for flag object");
- return NULL;
+ if (!PyObject_TypeCheck(other, &PyArrayFlags_Type)) {
+ Py_RETURN_NOTIMPLEMENTED;
}
- if (PyObject_TypeCheck(other, &PyArrayFlags_Type)) {
- cmp = arrayflags_compare((PyArrayFlagsObject *)self,
- (PyArrayFlagsObject *)other);
+ npy_bool eq = ((PyArrayFlagsObject*) self)->flags ==
+ ((PyArrayFlagsObject*) other)->flags;
- if (cmp_op == Py_EQ) {
- result = (cmp == 0) ? Py_True : Py_False;
- }
- else if (cmp_op == Py_NE) {
- result = (cmp != 0) ? Py_True : Py_False;
- }
+ if (cmp_op == Py_EQ) {
+ return PyBool_FromLong(eq);
+ }
+ else if (cmp_op == Py_NE) {
+ return PyBool_FromLong(!eq);
+ }
+ else {
+ Py_RETURN_NOTIMPLEMENTED;
}
-
- Py_INCREF(result);
- return result;
}
static PyMappingMethods arrayflags_as_mapping = {
@@ -793,61 +771,15 @@ arrayflags_new(PyTypeObject *NPY_UNUSED(self), PyObject *args, PyObject *NPY_UNU
}
NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.flagsobj",
- sizeof(PyArrayFlagsObject),
- 0, /* tp_itemsize */
- /* methods */
- (destructor)arrayflags_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- (cmpfunc)arrayflags_compare, /* tp_compare */
-#endif
- (reprfunc)arrayflags_print, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- &arrayflags_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- (reprfunc)arrayflags_print, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- arrayflags_richcompare, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- arrayflags_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- arrayflags_new, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.flagsobj",
+ .tp_basicsize = sizeof(PyArrayFlagsObject),
+ .tp_dealloc = (destructor)arrayflags_dealloc,
+ .tp_repr = (reprfunc)arrayflags_print,
+ .tp_as_mapping = &arrayflags_as_mapping,
+ .tp_str = (reprfunc)arrayflags_print,
+ .tp_flags =Py_TPFLAGS_DEFAULT,
+ .tp_richcompare = arrayflags_richcompare,
+ .tp_getset = arrayflags_getsets,
+ .tp_new = arrayflags_new,
};
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 116e37ce5..80a1cd4a1 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -13,6 +13,7 @@
#include "npy_import.h"
#include "common.h"
+#include "conversion_utils.h"
#include "ctors.h"
#include "scalartypes.h"
#include "descriptor.h"
@@ -20,7 +21,7 @@
#include "arrayobject.h"
#include "mem_overlap.h"
#include "alloc.h"
-#include "buffer.h"
+#include "npy_buffer.h"
/******************* array attribute get and set routines ******************/
@@ -62,33 +63,39 @@ array_shape_set(PyArrayObject *self, PyObject *val)
if (PyArray_DATA(ret) != PyArray_DATA(self)) {
Py_DECREF(ret);
PyErr_SetString(PyExc_AttributeError,
- "incompatible shape for a non-contiguous "\
- "array");
+ "Incompatible shape for in-place modification. Use "
+ "`.reshape()` to make a copy with the desired shape.");
return -1;
}
- /* Free old dimensions and strides */
- npy_free_cache_dim_array(self);
nd = PyArray_NDIM(ret);
- ((PyArrayObject_fields *)self)->nd = nd;
if (nd > 0) {
/* create new dimensions and strides */
- ((PyArrayObject_fields *)self)->dimensions = npy_alloc_cache_dim(2 * nd);
- if (PyArray_DIMS(self) == NULL) {
+ npy_intp *_dimensions = npy_alloc_cache_dim(2 * nd);
+ if (_dimensions == NULL) {
Py_DECREF(ret);
- PyErr_SetString(PyExc_MemoryError,"");
+ PyErr_NoMemory();
return -1;
}
- ((PyArrayObject_fields *)self)->strides = PyArray_DIMS(self) + nd;
+ /* Free old dimensions and strides */
+ npy_free_cache_dim_array(self);
+ ((PyArrayObject_fields *)self)->nd = nd;
+ ((PyArrayObject_fields *)self)->dimensions = _dimensions;
+ ((PyArrayObject_fields *)self)->strides = _dimensions + nd;
+
if (nd) {
memcpy(PyArray_DIMS(self), PyArray_DIMS(ret), nd*sizeof(npy_intp));
memcpy(PyArray_STRIDES(self), PyArray_STRIDES(ret), nd*sizeof(npy_intp));
}
}
else {
+ /* Free old dimensions and strides */
+ npy_free_cache_dim_array(self);
+ ((PyArrayObject_fields *)self)->nd = 0;
((PyArrayObject_fields *)self)->dimensions = NULL;
((PyArrayObject_fields *)self)->strides = NULL;
}
+
Py_DECREF(ret);
PyArray_UpdateFlags(self, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS);
return 0;
@@ -104,26 +111,21 @@ array_strides_get(PyArrayObject *self)
static int
array_strides_set(PyArrayObject *self, PyObject *obj)
{
- PyArray_Dims newstrides = {NULL, 0};
+ PyArray_Dims newstrides = {NULL, -1};
PyArrayObject *new;
npy_intp numbytes = 0;
npy_intp offset = 0;
npy_intp lower_offset = 0;
npy_intp upper_offset = 0;
-#if defined(NPY_PY3K)
Py_buffer view;
-#else
- Py_ssize_t buf_len;
- char *buf;
-#endif
if (obj == NULL) {
PyErr_SetString(PyExc_AttributeError,
"Cannot delete array strides");
return -1;
}
- if (!PyArray_IntpConverter(obj, &newstrides) ||
- newstrides.ptr == NULL) {
+ if (!PyArray_OptionalIntpConverter(obj, &newstrides) ||
+ newstrides.len == -1) {
PyErr_SetString(PyExc_TypeError, "invalid strides");
return -1;
}
@@ -140,7 +142,6 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
* Get the available memory through the buffer interface on
* PyArray_BASE(new) or if that fails from the current new
*/
-#if defined(NPY_PY3K)
if (PyArray_BASE(new) &&
PyObject_GetBuffer(PyArray_BASE(new), &view, PyBUF_SIMPLE) >= 0) {
offset = PyArray_BYTES(self) - (char *)view.buf;
@@ -148,14 +149,6 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
PyBuffer_Release(&view);
_dealloc_cached_buffer_info((PyObject*)new);
}
-#else
- if (PyArray_BASE(new) &&
- PyObject_AsReadBuffer(PyArray_BASE(new), (const void **)&buf,
- &buf_len) >= 0) {
- offset = PyArray_BYTES(self) - buf;
- numbytes = buf_len + offset;
- }
-#endif
else {
PyErr_Clear();
offset_bounds_from_strides(PyArray_ITEMSIZE(new), PyArray_NDIM(new),
@@ -286,31 +279,56 @@ array_interface_get(PyArrayObject *self)
Py_DECREF(dict);
return NULL;
}
+ int ret;
/* dataptr */
obj = array_dataptr_get(self);
- PyDict_SetItemString(dict, "data", obj);
+ ret = PyDict_SetItemString(dict, "data", obj);
Py_DECREF(obj);
+ if (ret < 0) {
+ Py_DECREF(dict);
+ return NULL;
+ }
obj = array_protocol_strides_get(self);
- PyDict_SetItemString(dict, "strides", obj);
+ ret = PyDict_SetItemString(dict, "strides", obj);
Py_DECREF(obj);
+ if (ret < 0) {
+ Py_DECREF(dict);
+ return NULL;
+ }
obj = array_protocol_descr_get(self);
- PyDict_SetItemString(dict, "descr", obj);
+ ret = PyDict_SetItemString(dict, "descr", obj);
Py_DECREF(obj);
+ if (ret < 0) {
+ Py_DECREF(dict);
+ return NULL;
+ }
obj = arraydescr_protocol_typestr_get(PyArray_DESCR(self));
- PyDict_SetItemString(dict, "typestr", obj);
+ ret = PyDict_SetItemString(dict, "typestr", obj);
Py_DECREF(obj);
+ if (ret < 0) {
+ Py_DECREF(dict);
+ return NULL;
+ }
obj = array_shape_get(self);
- PyDict_SetItemString(dict, "shape", obj);
+ ret = PyDict_SetItemString(dict, "shape", obj);
Py_DECREF(obj);
+ if (ret < 0) {
+ Py_DECREF(dict);
+ return NULL;
+ }
obj = PyInt_FromLong(3);
- PyDict_SetItemString(dict, "version", obj);
+ ret = PyDict_SetItemString(dict, "version", obj);
Py_DECREF(obj);
+ if (ret < 0) {
+ Py_DECREF(dict);
+ return NULL;
+ }
return dict;
}
@@ -318,23 +336,7 @@ array_interface_get(PyArrayObject *self)
static PyObject *
array_data_get(PyArrayObject *self)
{
-#if defined(NPY_PY3K)
return PyMemoryView_FromObject((PyObject *)self);
-#else
- npy_intp nbytes;
- if (!(PyArray_ISONESEGMENT(self))) {
- PyErr_SetString(PyExc_AttributeError, "cannot get single-"\
- "segment buffer for discontiguous array");
- return NULL;
- }
- nbytes = PyArray_NBYTES(self);
- if (PyArray_ISWRITEABLE(self)) {
- return PyBuffer_FromReadWriteObject((PyObject *)self, 0, (Py_ssize_t) nbytes);
- }
- else {
- return PyBuffer_FromObject((PyObject *)self, 0, (Py_ssize_t) nbytes);
- }
-#endif
}
static int
@@ -343,9 +345,7 @@ array_data_set(PyArrayObject *self, PyObject *op)
void *buf;
Py_ssize_t buf_len;
int writeable=1;
-#if defined(NPY_PY3K)
Py_buffer view;
-#endif
/* 2016-19-02, 1.12 */
int ret = DEPRECATE("Assigning the 'data' attribute is an "
@@ -360,7 +360,6 @@ array_data_set(PyArrayObject *self, PyObject *op)
"Cannot delete array data");
return -1;
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(op, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
writeable = 0;
PyErr_Clear();
@@ -378,18 +377,7 @@ array_data_set(PyArrayObject *self, PyObject *op)
*/
PyBuffer_Release(&view);
_dealloc_cached_buffer_info(op);
-#else
- if (PyObject_AsWriteBuffer(op, &buf, &buf_len) < 0) {
- PyErr_Clear();
- writeable = 0;
- if (PyObject_AsReadBuffer(op, (const void **)&buf, &buf_len) < 0) {
- PyErr_Clear();
- PyErr_SetString(PyExc_AttributeError,
- "object does not have single-segment buffer interface");
- return -1;
- }
- }
-#endif
+
if (!PyArray_ISONESEGMENT(self)) {
PyErr_SetString(PyExc_AttributeError,
"cannot set single-segment buffer for discontiguous array");
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index a6ac902d3..f0ef8ba3b 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -21,11 +21,168 @@
#include "lowlevel_strided_loops.h"
#include "array_assign.h"
-#include "item_selection.h"
#include "npy_sort.h"
#include "npy_partition.h"
#include "npy_binsearch.h"
#include "alloc.h"
+#include "arraytypes.h"
+
+
+
+static NPY_GCC_OPT_3 NPY_INLINE int
+npy_fasttake_impl(
+ char *dest, char *src, const npy_intp *indices,
+ npy_intp n, npy_intp m, npy_intp max_item,
+ npy_intp nelem, npy_intp chunk,
+ NPY_CLIPMODE clipmode, npy_intp itemsize, int needs_refcounting,
+ PyArray_Descr *dtype, int axis)
+{
+ NPY_BEGIN_THREADS_DEF;
+ NPY_BEGIN_THREADS_DESCR(dtype);
+ switch (clipmode) {
+ case NPY_RAISE:
+ for (npy_intp i = 0; i < n; i++) {
+ for (npy_intp j = 0; j < m; j++) {
+ npy_intp tmp = indices[j];
+ if (check_and_adjust_index(&tmp, max_item, axis,
+ _save) < 0) {
+ return -1;
+ }
+ char *tmp_src = src + tmp * chunk;
+ if (needs_refcounting) {
+ for (npy_intp k = 0; k < nelem; k++) {
+ PyArray_Item_INCREF(tmp_src, dtype);
+ PyArray_Item_XDECREF(dest, dtype);
+ memmove(dest, tmp_src, itemsize);
+ dest += itemsize;
+ tmp_src += itemsize;
+ }
+ }
+ else {
+ memmove(dest, tmp_src, chunk);
+ dest += chunk;
+ }
+ }
+ src += chunk*max_item;
+ }
+ break;
+ case NPY_WRAP:
+ for (npy_intp i = 0; i < n; i++) {
+ for (npy_intp j = 0; j < m; j++) {
+ npy_intp tmp = indices[j];
+ if (tmp < 0) {
+ while (tmp < 0) {
+ tmp += max_item;
+ }
+ }
+ else if (tmp >= max_item) {
+ while (tmp >= max_item) {
+ tmp -= max_item;
+ }
+ }
+ char *tmp_src = src + tmp * chunk;
+ if (needs_refcounting) {
+ for (npy_intp k = 0; k < nelem; k++) {
+ PyArray_Item_INCREF(tmp_src, dtype);
+ PyArray_Item_XDECREF(dest, dtype);
+ memmove(dest, tmp_src, itemsize);
+ dest += itemsize;
+ tmp_src += itemsize;
+ }
+ }
+ else {
+ memmove(dest, tmp_src, chunk);
+ dest += chunk;
+ }
+ }
+ src += chunk*max_item;
+ }
+ break;
+ case NPY_CLIP:
+ for (npy_intp i = 0; i < n; i++) {
+ for (npy_intp j = 0; j < m; j++) {
+ npy_intp tmp = indices[j];
+ if (tmp < 0) {
+ tmp = 0;
+ }
+ else if (tmp >= max_item) {
+ tmp = max_item - 1;
+ }
+ char *tmp_src = src + tmp * chunk;
+ if (needs_refcounting) {
+ for (npy_intp k = 0; k < nelem; k++) {
+ PyArray_Item_INCREF(tmp_src, dtype);
+ PyArray_Item_XDECREF(dest, dtype);
+ memmove(dest, tmp_src, itemsize);
+ dest += itemsize;
+ tmp_src += itemsize;
+ }
+ }
+ else {
+ memmove(dest, tmp_src, chunk);
+ dest += chunk;
+ }
+ }
+ src += chunk*max_item;
+ }
+ break;
+ }
+
+ NPY_END_THREADS;
+ return 0;
+}
+
+
+/*
+ * Helper function instantiating npy_fasttake_impl in different branches
+ * to allow the compiler to optimize each to the specific itemsize.
+ */
+static NPY_GCC_OPT_3 int
+npy_fasttake(
+ char *dest, char *src, const npy_intp *indices,
+ npy_intp n, npy_intp m, npy_intp max_item,
+ npy_intp nelem, npy_intp chunk,
+ NPY_CLIPMODE clipmode, npy_intp itemsize, int needs_refcounting,
+ PyArray_Descr *dtype, int axis)
+{
+ if (!needs_refcounting) {
+ if (chunk == 1) {
+ return npy_fasttake_impl(
+ dest, src, indices, n, m, max_item, nelem, chunk,
+ clipmode, itemsize, needs_refcounting, dtype, axis);
+ }
+ if (chunk == 2) {
+ return npy_fasttake_impl(
+ dest, src, indices, n, m, max_item, nelem, chunk,
+ clipmode, itemsize, needs_refcounting, dtype, axis);
+ }
+ if (chunk == 4) {
+ return npy_fasttake_impl(
+ dest, src, indices, n, m, max_item, nelem, chunk,
+ clipmode, itemsize, needs_refcounting, dtype, axis);
+ }
+ if (chunk == 8) {
+ return npy_fasttake_impl(
+ dest, src, indices, n, m, max_item, nelem, chunk,
+ clipmode, itemsize, needs_refcounting, dtype, axis);
+ }
+ if (chunk == 16) {
+ return npy_fasttake_impl(
+ dest, src, indices, n, m, max_item, nelem, chunk,
+ clipmode, itemsize, needs_refcounting, dtype, axis);
+ }
+ if (chunk == 32) {
+ return npy_fasttake_impl(
+ dest, src, indices, n, m, max_item, nelem, chunk,
+ clipmode, itemsize, needs_refcounting, dtype, axis);
+ }
+ }
+
+ return npy_fasttake_impl(
+ dest, src, indices, n, m, max_item, nelem, chunk,
+ clipmode, itemsize, needs_refcounting, dtype, axis);
+}
+
/*NUMPY_API
* Take
@@ -35,12 +192,10 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
PyArrayObject *out, NPY_CLIPMODE clipmode)
{
PyArray_Descr *dtype;
- PyArray_FastTakeFunc *func;
PyArrayObject *obj = NULL, *self, *indices;
- npy_intp nd, i, j, n, m, k, max_item, tmp, chunk, itemsize, nelem;
+ npy_intp nd, i, n, m, max_item, chunk, itemsize, nelem;
npy_intp shape[NPY_MAXDIMS];
- char *src, *dest, *tmp_src;
- int err;
+
npy_bool needs_refcounting;
indices = NULL;
@@ -122,9 +277,10 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
nelem = chunk;
itemsize = PyArray_ITEMSIZE(obj);
chunk = chunk * itemsize;
- src = PyArray_DATA(self);
- dest = PyArray_DATA(obj);
+ char *src = PyArray_DATA(self);
+ char *dest = PyArray_DATA(obj);
needs_refcounting = PyDataType_REFCHK(PyArray_DESCR(self));
+ npy_intp *indices_data = (npy_intp *)PyArray_DATA(indices);
if ((max_item == 0) && (PyArray_SIZE(obj) != 0)) {
/* Index error, since that is the usual error for raise mode */
@@ -133,107 +289,10 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
goto fail;
}
- func = PyArray_DESCR(self)->f->fasttake;
- if (func == NULL) {
- NPY_BEGIN_THREADS_DEF;
- NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(self));
- switch(clipmode) {
- case NPY_RAISE:
- for (i = 0; i < n; i++) {
- for (j = 0; j < m; j++) {
- tmp = ((npy_intp *)(PyArray_DATA(indices)))[j];
- if (check_and_adjust_index(&tmp, max_item, axis,
- _save) < 0) {
- goto fail;
- }
- tmp_src = src + tmp * chunk;
- if (needs_refcounting) {
- for (k=0; k < nelem; k++) {
- PyArray_Item_INCREF(tmp_src, PyArray_DESCR(self));
- PyArray_Item_XDECREF(dest, PyArray_DESCR(self));
- memmove(dest, tmp_src, itemsize);
- dest += itemsize;
- tmp_src += itemsize;
- }
- }
- else {
- memmove(dest, tmp_src, chunk);
- dest += chunk;
- }
- }
- src += chunk*max_item;
- }
- break;
- case NPY_WRAP:
- for (i = 0; i < n; i++) {
- for (j = 0; j < m; j++) {
- tmp = ((npy_intp *)(PyArray_DATA(indices)))[j];
- if (tmp < 0) {
- while (tmp < 0) {
- tmp += max_item;
- }
- }
- else if (tmp >= max_item) {
- while (tmp >= max_item) {
- tmp -= max_item;
- }
- }
- tmp_src = src + tmp * chunk;
- if (needs_refcounting) {
- for (k=0; k < nelem; k++) {
- PyArray_Item_INCREF(tmp_src, PyArray_DESCR(self));
- PyArray_Item_XDECREF(dest, PyArray_DESCR(self));
- memmove(dest, tmp_src, itemsize);
- dest += itemsize;
- tmp_src += itemsize;
- }
- }
- else {
- memmove(dest, tmp_src, chunk);
- dest += chunk;
- }
- }
- src += chunk*max_item;
- }
- break;
- case NPY_CLIP:
- for (i = 0; i < n; i++) {
- for (j = 0; j < m; j++) {
- tmp = ((npy_intp *)(PyArray_DATA(indices)))[j];
- if (tmp < 0) {
- tmp = 0;
- }
- else if (tmp >= max_item) {
- tmp = max_item - 1;
- }
- tmp_src = src + tmp * chunk;
- if (needs_refcounting) {
- for (k=0; k < nelem; k++) {
- PyArray_Item_INCREF(tmp_src, PyArray_DESCR(self));
- PyArray_Item_XDECREF(dest, PyArray_DESCR(self));
- memmove(dest, tmp_src, itemsize);
- dest += itemsize;
- tmp_src += itemsize;
- }
- }
- else {
- memmove(dest, tmp_src, chunk);
- dest += chunk;
- }
- }
- src += chunk*max_item;
- }
- break;
- }
- NPY_END_THREADS;
- }
- else {
- /* no gil release, need it for error reporting */
- err = func(dest, src, (npy_intp *)(PyArray_DATA(indices)),
- max_item, n, m, nelem, clipmode);
- if (err) {
- goto fail;
- }
+ if (npy_fasttake(
+ dest, src, indices_data, n, m, max_item, nelem, chunk,
+ clipmode, itemsize, needs_refcounting, dtype, axis) < 0) {
+ goto fail;
}
Py_XDECREF(indices);
@@ -431,16 +490,78 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
return NULL;
}
+
+static NPY_GCC_OPT_3 NPY_INLINE void
+npy_fastputmask_impl(
+ char *dest, char *src, const npy_bool *mask_data,
+ npy_intp ni, npy_intp nv, npy_intp chunk)
+{
+ if (nv == 1) {
+ for (npy_intp i = 0; i < ni; i++) {
+ if (mask_data[i]) {
+ memmove(dest, src, chunk);
+ }
+ dest += chunk;
+ }
+ }
+ else {
+ char *tmp_src = src;
+ for (npy_intp i = 0, j = 0; i < ni; i++, j++) {
+ if (NPY_UNLIKELY(j >= nv)) {
+ j = 0;
+ tmp_src = src;
+ }
+ if (mask_data[i]) {
+ memmove(dest, tmp_src, chunk);
+ }
+ dest += chunk;
+ tmp_src += chunk;
+ }
+ }
+}
+
+
+/*
+ * Helper function instantiating npy_fastput_impl in different branches
+ * to allow the compiler to optimize each to the specific itemsize.
+ */
+static NPY_GCC_OPT_3 void
+npy_fastputmask(
+ char *dest, char *src, npy_bool *mask_data,
+ npy_intp ni, npy_intp nv, npy_intp chunk)
+{
+ if (chunk == 1) {
+ return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk);
+ }
+ if (chunk == 2) {
+ return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk);
+ }
+ if (chunk == 4) {
+ return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk);
+ }
+ if (chunk == 8) {
+ return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk);
+ }
+ if (chunk == 16) {
+ return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk);
+ }
+ if (chunk == 32) {
+ return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk);
+ }
+
+ return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk);
+}
+
+
/*NUMPY_API
* Put values into an array according to a mask.
*/
NPY_NO_EXPORT PyObject *
PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
{
- PyArray_FastPutmaskFunc *func;
PyArrayObject *mask, *values;
PyArray_Descr *dtype;
- npy_intp i, j, chunk, ni, nv;
+ npy_intp chunk, ni, nv;
char *src, *dest;
npy_bool *mask_data;
int copied = 0;
@@ -505,7 +626,7 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
dest = PyArray_DATA(self);
if (PyDataType_REFCHK(PyArray_DESCR(self))) {
- for (i = 0, j = 0; i < ni; i++, j++) {
+ for (npy_intp i = 0, j = 0; i < ni; i++, j++) {
if (j >= nv) {
j = 0;
}
@@ -522,20 +643,7 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
else {
NPY_BEGIN_THREADS_DEF;
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(self));
- func = PyArray_DESCR(self)->f->fastputmask;
- if (func == NULL) {
- for (i = 0, j = 0; i < ni; i++, j++) {
- if (j >= nv) {
- j = 0;
- }
- if (mask_data[i]) {
- memmove(dest + i*chunk, src + j*chunk, chunk);
- }
- }
- }
- else {
- func(dest, mask_data, ni, src, nv);
- }
+ npy_fastputmask(dest, src, mask_data, ni, nv, chunk);
NPY_END_THREADS;
}
@@ -825,7 +933,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out,
*/
static int
_new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
- PyArray_PartitionFunc *part, npy_intp *kth, npy_intp nkth)
+ PyArray_PartitionFunc *part, npy_intp const *kth, npy_intp nkth)
{
npy_intp N = PyArray_DIM(op, axis);
npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op);
@@ -953,7 +1061,7 @@ fail:
static PyObject*
_new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
PyArray_ArgPartitionFunc *argpart,
- npy_intp *kth, npy_intp nkth)
+ npy_intp const *kth, npy_intp nkth)
{
npy_intp N = PyArray_DIM(op, axis);
npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op);
@@ -1059,12 +1167,10 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
if (argpart == NULL) {
ret = argsort(valptr, idxptr, N, op);
-#if defined(NPY_PY3K)
/* Object comparisons may raise an exception in Python 3 */
if (hasrefs && PyErr_Occurred()) {
ret = -1;
}
-#endif
if (ret < 0) {
goto fail;
}
@@ -1075,12 +1181,10 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
for (i = 0; i < nkth; ++i) {
ret = argpart(valptr, idxptr, N, kth[i], pivots, &npiv, op);
-#if defined(NPY_PY3K)
/* Object comparisons may raise an exception in Python 3 */
if (hasrefs && PyErr_Occurred()) {
ret = -1;
}
-#endif
if (ret < 0) {
goto fail;
}
@@ -1566,12 +1670,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
_strided_byte_swap(valbuffer, (npy_intp) elsize, N, elsize);
}
rcode = argsort(valbuffer, (npy_intp *)indbuffer, N, mps[j]);
-#if defined(NPY_PY3K)
if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j]))
&& PyErr_Occurred())) {
-#else
- if (rcode < 0) {
-#endif
PyDataMem_FREE(valbuffer);
PyDataMem_FREE(indbuffer);
free(swaps);
@@ -1601,12 +1701,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
}
rcode = argsort(its[j]->dataptr,
(npy_intp *)rit->dataptr, N, mps[j]);
-#if defined(NPY_PY3K)
if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j]))
&& PyErr_Occurred())) {
-#else
- if (rcode < 0) {
-#endif
goto fail;
}
PyArray_ITER_NEXT(its[j]);
@@ -2028,7 +2124,7 @@ count_nonzero_bytes_384(const npy_uint64 * w)
* Returns -1 on error.
*/
NPY_NO_EXPORT npy_intp
-count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides)
+count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const *astrides)
{
int idim;
npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS];
@@ -2392,7 +2488,7 @@ PyArray_Nonzero(PyArrayObject *self)
Py_DECREF(ret);
return NULL;
}
-
+
needs_api = NpyIter_IterationNeedsAPI(iter);
NPY_BEGIN_THREADS_NDITER(iter);
@@ -2436,7 +2532,7 @@ finish:
Py_DECREF(ret);
return NULL;
}
-
+
/* if executed `nonzero()` check for miscount due to side-effect */
if (!is_bool && added_count != nonzero_count) {
PyErr_SetString(PyExc_RuntimeError,
diff --git a/numpy/core/src/multiarray/item_selection.h b/numpy/core/src/multiarray/item_selection.h
index 2276b4db7..c1c8b5567 100644
--- a/numpy/core/src/multiarray/item_selection.h
+++ b/numpy/core/src/multiarray/item_selection.h
@@ -8,7 +8,7 @@
* Returns -1 on error.
*/
NPY_NO_EXPORT npy_intp
-count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides);
+count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const *astrides);
/*
* Gets a single item from the array, based on a single multi-index
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index e66bb36aa..c71b7b770 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -172,7 +172,7 @@ NPY_NO_EXPORT PyObject *
PyArray_IterNew(PyObject *obj)
{
/*
- * Note that internall PyArray_RawIterBaseInit may be called directly on a
+ * Note that internally PyArray_RawIterBaseInit may be called directly on a
* statically allocated PyArrayIterObject.
*/
PyArrayIterObject *it;
@@ -1102,63 +1102,17 @@ static PyGetSetDef iter_getsets[] = {
};
NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.flatiter", /* tp_name */
- sizeof(PyArrayIterObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)arrayiter_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- &iter_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- (richcmpfunc)iter_richcompare, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- (iternextfunc)arrayiter_next, /* tp_iternext */
- iter_methods, /* tp_methods */
- iter_members, /* tp_members */
- iter_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.flatiter",
+ .tp_basicsize = sizeof(PyArrayIterObject),
+ .tp_dealloc = (destructor)arrayiter_dealloc,
+ .tp_as_mapping = &iter_as_mapping,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_richcompare = (richcmpfunc)iter_richcompare,
+ .tp_iternext = (iternextfunc)arrayiter_next,
+ .tp_methods = iter_methods,
+ .tp_members = iter_members,
+ .tp_getset = iter_getsets,
};
/** END of Array Iterator **/
@@ -1560,63 +1514,16 @@ static PyMethodDef arraymultiter_methods[] = {
};
NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.broadcast", /* tp_name */
- sizeof(PyArrayMultiIterObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)arraymultiter_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- (iternextfunc)arraymultiter_next, /* tp_iternext */
- arraymultiter_methods, /* tp_methods */
- arraymultiter_members, /* tp_members */
- arraymultiter_getsetlist, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)0, /* tp_init */
- 0, /* tp_alloc */
- arraymultiter_new, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.broadcast",
+ .tp_basicsize = sizeof(PyArrayMultiIterObject),
+ .tp_dealloc = (destructor)arraymultiter_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_iternext = (iternextfunc)arraymultiter_next,
+ .tp_methods = arraymultiter_methods,
+ .tp_members = arraymultiter_members,
+ .tp_getset = arraymultiter_getsetlist,
+ .tp_new = arraymultiter_new,
};
/*========================= Neighborhood iterator ======================*/
@@ -1890,60 +1797,9 @@ static void neighiter_dealloc(PyArrayNeighborhoodIterObject* iter)
}
NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.neigh_internal_iter", /* tp_name*/
- sizeof(PyArrayNeighborhoodIterObject), /* tp_basicsize*/
- 0, /* tp_itemsize*/
- (destructor)neighiter_dealloc, /* tp_dealloc*/
- 0, /* tp_print*/
- 0, /* tp_getattr*/
- 0, /* tp_setattr*/
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr*/
- 0, /* tp_as_number*/
- 0, /* tp_as_sequence*/
- 0, /* tp_as_mapping*/
- 0, /* tp_hash */
- 0, /* tp_call*/
- 0, /* tp_str*/
- 0, /* tp_getattro*/
- 0, /* tp_setattro*/
- 0, /* tp_as_buffer*/
- Py_TPFLAGS_DEFAULT, /* tp_flags*/
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- (iternextfunc)0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.neigh_internal_iter",
+ .tp_basicsize = sizeof(PyArrayNeighborhoodIterObject),
+ .tp_dealloc = (destructor)neighiter_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
};
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index 63b2a8842..d234c366c 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -974,9 +974,9 @@ PyArray_GetStridedNumericCastFn(int aligned, npy_intp src_stride,
NPY_NO_EXPORT npy_intp
PyArray_TransferNDimToStrided(npy_intp ndim,
char *dst, npy_intp dst_stride,
- char *src, npy_intp *src_strides, npy_intp src_strides_inc,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ char *src, npy_intp const *src_strides, npy_intp src_strides_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_StridedUnaryOp *stransfer,
NpyAuxData *data)
@@ -1092,10 +1092,10 @@ PyArray_TransferNDimToStrided(npy_intp ndim,
/* See documentation of arguments in lowlevel_strided_loops.h */
NPY_NO_EXPORT npy_intp
PyArray_TransferStridedToNDim(npy_intp ndim,
- char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
+ char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_StridedUnaryOp *stransfer,
NpyAuxData *data)
@@ -1211,11 +1211,11 @@ PyArray_TransferStridedToNDim(npy_intp ndim,
/* See documentation of arguments in lowlevel_strided_loops.h */
NPY_NO_EXPORT npy_intp
PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
- char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
+ char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
npy_uint8 *mask, npy_intp mask_stride,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_MaskedStridedUnaryOp *stransfer,
NpyAuxData *data)
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 247864775..7047304eb 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -227,11 +227,7 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
/* Obvious single-entry cases */
if (0 /* to aid macros below */
-#if !defined(NPY_PY3K)
- || PyInt_CheckExact(index)
-#else
|| PyLong_CheckExact(index)
-#endif
|| index == Py_None
|| PySlice_Check(index)
|| PyArray_Check(index)
@@ -481,11 +477,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
*
* Check for integers first, purely for performance
*/
-#if !defined(NPY_PY3K)
- if (PyInt_CheckExact(obj) || !PyArray_Check(obj)) {
-#else
if (PyLong_CheckExact(obj) || !PyArray_Check(obj)) {
-#endif
npy_intp ind = PyArray_PyIntAsIntp(obj);
if (error_converting(ind)) {
@@ -735,8 +727,11 @@ prepare_index(PyArrayObject *self, PyObject *index,
}
}
else if (used_ndim > PyArray_NDIM(self)) {
- PyErr_SetString(PyExc_IndexError,
- "too many indices for array");
+ PyErr_Format(PyExc_IndexError,
+ "too many indices for array: "
+ "array is %d-dimensional, but %d were indexed",
+ PyArray_NDIM(self),
+ used_ndim);
goto failed_building_indices;
}
else if (index_ndim == 0) {
@@ -1198,9 +1193,9 @@ array_assign_boolean_subscript(PyArrayObject *self,
if (size != PyArray_DIMS(v)[0]) {
PyErr_Format(PyExc_ValueError,
"NumPy boolean array indexing assignment "
- "cannot assign %d input values to "
- "the %d output values where the mask is true",
- (int)PyArray_DIMS(v)[0], (int)size);
+ "cannot assign %" NPY_INTP_FMT " input values to "
+ "the %" NPY_INTP_FMT " output values where the mask is true",
+ PyArray_DIMS(v)[0], size);
return -1;
}
v_stride = PyArray_STRIDES(v)[0];
@@ -1412,8 +1407,11 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
npy_intp offset;
/* get the field offset and dtype */
- tup = PyDict_GetItem(PyArray_DESCR(arr)->fields, ind);
- if (tup == NULL){
+ tup = PyDict_GetItemWithError(PyArray_DESCR(arr)->fields, ind);
+ if (tup == NULL && PyErr_Occurred()) {
+ return 0;
+ }
+ else if (tup == NULL){
PyObject *errmsg = PyUString_FromString("no field of name ");
PyUString_Concat(&errmsg, ind);
PyErr_SetObject(PyExc_ValueError, errmsg);
@@ -3340,61 +3338,9 @@ arraymapiter_dealloc(PyArrayMapIterObject *mit)
* to a[indexobj].flat but the latter gets to use slice syntax.
*/
NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.mapiter", /* tp_name */
- sizeof(PyArrayMapIterObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)arraymapiter_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.mapiter",
+ .tp_basicsize = sizeof(PyArrayMapIterObject),
+ .tp_dealloc = (destructor)arraymapiter_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
};
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index e5845f2f6..7bfbeca15 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -64,7 +64,11 @@ get_forwarding_ndarray_method(const char *name)
if (module_methods == NULL) {
return NULL;
}
- callable = PyDict_GetItemString(PyModule_GetDict(module_methods), name);
+ callable = _PyDict_GetItemStringWithError(PyModule_GetDict(module_methods), name);
+ if (callable == NULL && PyErr_Occurred()) {
+ Py_DECREF(module_methods);
+ return NULL;
+ }
if (callable == NULL) {
Py_DECREF(module_methods);
PyErr_Format(PyExc_RuntimeError,
@@ -562,6 +566,23 @@ array_tobytes(PyArrayObject *self, PyObject *args, PyObject *kwds)
return PyArray_ToString(self, order);
}
+static PyObject *
+array_tostring(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ NPY_ORDER order = NPY_CORDER;
+ static char *kwlist[] = {"order", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:tostring", kwlist,
+ PyArray_OrderConverter, &order)) {
+ return NULL;
+ }
+ /* 2020-03-30, NumPy 1.19 */
+ if (DEPRECATE("tostring() is deprecated. Use tobytes() instead.") < 0) {
+ return NULL;
+ }
+ return PyArray_ToString(self, order);
+}
+
/* This should grow an order= keyword to be consistent
*/
@@ -1469,7 +1490,7 @@ array_argpartition(PyArrayObject *self, PyObject *args, PyObject *kwds)
static PyObject *
array_searchsorted(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"keys", "side", "sorter", NULL};
+ static char *kwlist[] = {"v", "side", "sorter", NULL};
PyObject *keys;
PyObject *sorter;
NPY_SEARCHSIDE side = NPY_SEARCHLEFT;
@@ -1499,7 +1520,7 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype,
int offset;
Py_ssize_t pos = 0;
while (PyDict_Next(dtype->fields, &pos, &key, &value)) {
- if NPY_TITLE_KEY(key, value) {
+ if (NPY_TITLE_KEY(key, value)) {
continue;
}
if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset,
@@ -1954,7 +1975,6 @@ array_setstate(PyArrayObject *self, PyObject *args)
else {
Py_INCREF(rawdata);
-#if defined(NPY_PY3K)
/* Backward compatibility with Python 2 NumPy pickles */
if (PyUnicode_Check(rawdata)) {
PyObject *tmp;
@@ -1969,7 +1989,6 @@ array_setstate(PyArrayObject *self, PyObject *args)
return NULL;
}
}
-#endif
if (!PyBytes_Check(rawdata)) {
PyErr_SetString(PyExc_TypeError,
@@ -2030,14 +2049,9 @@ array_setstate(PyArrayObject *self, PyObject *args)
if (!PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) {
int swap = PyArray_ISBYTESWAPPED(self);
fa->data = datastr;
-#ifndef NPY_PY3K
- /* Check that the string is not interned */
- if (!IsAligned(self) || swap || PyString_CHECK_INTERNED(rawdata)) {
-#else
/* Bytes should always be considered immutable, but we just grab the
* pointer if they are large, to save memory. */
if (!IsAligned(self) || swap || (len <= 1000)) {
-#endif
npy_intp num = PyArray_NBYTES(self);
if (num == 0) {
Py_DECREF(rawdata);
@@ -2641,51 +2655,6 @@ array_complex(PyArrayObject *self, PyObject *NPY_UNUSED(args))
return c;
}
-#ifndef NPY_PY3K
-
-static PyObject *
-array_getslice(PyArrayObject *self, PyObject *args)
-{
- PyObject *start, *stop, *slice, *result;
- if (!PyArg_ParseTuple(args, "OO:__getslice__", &start, &stop)) {
- return NULL;
- }
-
- slice = PySlice_New(start, stop, NULL);
- if (slice == NULL) {
- return NULL;
- }
-
- /* Deliberately delegate to subclasses */
- result = PyObject_GetItem((PyObject *)self, slice);
- Py_DECREF(slice);
- return result;
-}
-
-static PyObject *
-array_setslice(PyArrayObject *self, PyObject *args)
-{
- PyObject *start, *stop, *value, *slice;
- if (!PyArg_ParseTuple(args, "OOO:__setslice__", &start, &stop, &value)) {
- return NULL;
- }
-
- slice = PySlice_New(start, stop, NULL);
- if (slice == NULL) {
- return NULL;
- }
-
- /* Deliberately delegate to subclasses */
- if (PyObject_SetItem((PyObject *)self, slice, value) < 0) {
- Py_DECREF(slice);
- return NULL;
- }
- Py_DECREF(slice);
- Py_RETURN_NONE;
-}
-
-#endif
-
NPY_NO_EXPORT PyMethodDef array_methods[] = {
/* for subtypes */
@@ -2705,12 +2674,6 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
(PyCFunction)array_function,
METH_VARARGS | METH_KEYWORDS, NULL},
-#ifndef NPY_PY3K
- {"__unicode__",
- (PyCFunction)array_unicode,
- METH_NOARGS, NULL},
-#endif
-
/* for the sys module */
{"__sizeof__",
(PyCFunction) array_sizeof,
@@ -2749,23 +2712,6 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
(PyCFunction) array_format,
METH_VARARGS, NULL},
-#ifndef NPY_PY3K
- /*
- * While we could put these in `tp_sequence`, its' easier to define them
- * in terms of PyObject* arguments.
- *
- * We must provide these for compatibility with code that calls them
- * directly. They are already deprecated at a language level in python 2.7,
- * but are removed outright in python 3.
- */
- {"__getslice__",
- (PyCFunction) array_getslice,
- METH_VARARGS, NULL},
- {"__setslice__",
- (PyCFunction) array_setslice,
- METH_VARARGS, NULL},
-#endif
-
/* Original and Extended methods added 2005 */
{"all",
(PyCFunction)array_all,
@@ -2915,7 +2861,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
(PyCFunction)array_tolist,
METH_VARARGS, NULL},
{"tostring",
- (PyCFunction)array_tobytes,
+ (PyCFunction)array_tostring,
METH_VARARGS | METH_KEYWORDS, NULL},
{"trace",
(PyCFunction)array_trace,
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 441567049..9e8022abd 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -118,6 +118,9 @@ PyArray_GetPriority(PyObject *obj, double default_)
ret = PyArray_LookupSpecial_OnInstance(obj, "__array_priority__");
if (ret == NULL) {
+ if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
return default_;
}
@@ -158,7 +161,7 @@ PyArray_MultiplyList(npy_intp const *l1, int n)
* Multiply a List of Non-negative numbers with over-flow detection.
*/
NPY_NO_EXPORT npy_intp
-PyArray_OverflowMultiplyList(npy_intp *l1, int n)
+PyArray_OverflowMultiplyList(npy_intp const *l1, int n)
{
npy_intp prod = 1;
int i;
@@ -1112,6 +1115,14 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum,
n1 = PyArray_DIMS(ap1)[0];
n2 = PyArray_DIMS(ap2)[0];
+ if (n1 == 0) {
+ PyErr_SetString(PyExc_ValueError, "first array argument cannot be empty");
+ return NULL;
+ }
+ if (n2 == 0) {
+ PyErr_SetString(PyExc_ValueError, "second array argument cannot be empty");
+ return NULL;
+ }
if (n1 < n2) {
ret = ap1;
ap1 = ap2;
@@ -1548,7 +1559,6 @@ _prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order)
return ret;
}
-
#define STRIDING_OK(op, order) \
((order) == NPY_ANYORDER || \
(order) == NPY_KEEPORDER || \
@@ -1562,8 +1572,7 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
PyArrayObject *oparr = NULL, *ret = NULL;
npy_bool subok = NPY_FALSE;
npy_bool copy = NPY_TRUE;
- int nd;
- npy_intp ndmin = 0;
+ int ndmin = 0, nd;
PyArray_Descr *type = NULL;
PyArray_Descr *oldtype = NULL;
NPY_ORDER order = NPY_KEEPORDER;
@@ -1573,8 +1582,9 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
"ndmin", NULL};
if (PyTuple_GET_SIZE(args) > 2) {
- PyErr_SetString(PyExc_ValueError,
- "only 2 non-keyword arguments accepted");
+ PyErr_Format(PyExc_TypeError,
+ "array() takes from 1 to 2 positional arguments but "
+ "%zd were given", PyTuple_GET_SIZE(args));
return NULL;
}
@@ -1591,7 +1601,10 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
dtype_obj = PyTuple_GET_ITEM(args, 1);
}
else if (kws) {
- dtype_obj = PyDict_GetItem(kws, npy_ma_str_dtype);
+ dtype_obj = PyDict_GetItemWithError(kws, npy_ma_str_dtype);
+ if (dtype_obj == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
if (dtype_obj == NULL) {
dtype_obj = Py_None;
}
@@ -1608,7 +1621,10 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
else {
/* fast path for copy=False rest default (np.asarray) */
PyObject * copy_obj, * order_obj, *ndmin_obj;
- copy_obj = PyDict_GetItem(kws, npy_ma_str_copy);
+ copy_obj = PyDict_GetItemWithError(kws, npy_ma_str_copy);
+ if (copy_obj == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
if (copy_obj != Py_False) {
goto full_path;
}
@@ -1617,21 +1633,28 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
/* order does not matter for contiguous 1d arrays */
if (PyArray_NDIM((PyArrayObject*)op) > 1 ||
!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)op)) {
- order_obj = PyDict_GetItem(kws, npy_ma_str_order);
- if (order_obj != Py_None && order_obj != NULL) {
+ order_obj = PyDict_GetItemWithError(kws, npy_ma_str_order);
+ if (order_obj == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
+ else if (order_obj != Py_None && order_obj != NULL) {
goto full_path;
}
}
- ndmin_obj = PyDict_GetItem(kws, npy_ma_str_ndmin);
- if (ndmin_obj) {
- ndmin = PyLong_AsLong(ndmin_obj);
- if (error_converting(ndmin)) {
+ ndmin_obj = PyDict_GetItemWithError(kws, npy_ma_str_ndmin);
+ if (ndmin_obj == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
+ else if (ndmin_obj) {
+ long t = PyLong_AsLong(ndmin_obj);
+ if (error_converting(t)) {
goto clean_type;
}
- else if (ndmin > NPY_MAXDIMS) {
+ else if (t > NPY_MAXDIMS) {
goto full_path;
}
+ ndmin = t;
}
/* copy=False with default dtype, order (any is OK) and ndim */
@@ -1834,14 +1857,15 @@ array_empty_like(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
NPY_ORDER order = NPY_KEEPORDER;
PyArrayObject *ret = NULL;
int subok = 1;
- PyArray_Dims shape = {NULL, 0};
+ /* -1 is a special value meaning "not specified" */
+ PyArray_Dims shape = {NULL, -1};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&iO&:empty_like", kwlist,
&PyArray_Converter, &prototype,
&PyArray_DescrConverter2, &dtype,
&PyArray_OrderConverter, &order,
&subok,
- &PyArray_IntpConverter, &shape)) {
+ &PyArray_OptionalIntpConverter, &shape)) {
goto fail;
}
/* steals the reference to dtype if it's not NULL */
@@ -1880,8 +1904,17 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
&PyArrayDescr_Type, &typecode, &obj)) {
return NULL;
}
+ if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) {
+ if (!PySequence_Check(obj)) {
+ PyErr_SetString(PyExc_TypeError,
+ "found non-sequence while unpickling scalar with "
+ "NPY_LIST_PICKLE set");
+ return NULL;
+ }
+ dptr = &obj;
+ }
- if (PyDataType_FLAGCHK(typecode, NPY_ITEM_IS_POINTER)) {
+ else if (PyDataType_FLAGCHK(typecode, NPY_ITEM_IS_POINTER)) {
if (obj == NULL) {
obj = Py_None;
}
@@ -1900,7 +1933,6 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
alloc = 1;
}
else {
-#if defined(NPY_PY3K)
/* Backward compatibility with Python 2 NumPy pickles */
if (PyUnicode_Check(obj)) {
tmpobj = PyUnicode_AsLatin1String(obj);
@@ -1914,8 +1946,6 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
return NULL;
}
}
-#endif
-
if (!PyString_Check(obj)) {
PyErr_SetString(PyExc_TypeError,
"initializing object must be a string");
@@ -2000,11 +2030,7 @@ array_count_nonzero(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
if (count == -1) {
return NULL;
}
-#if defined(NPY_PY3K)
return PyLong_FromSsize_t(count);
-#else
- return PyInt_FromSsize_t(count);
-#endif
}
static PyObject *
@@ -2063,7 +2089,7 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
if (file == NULL) {
return NULL;
}
-
+
if (offset != 0 && strcmp(sep, "") != 0) {
PyErr_SetString(PyExc_TypeError, "'offset' argument only permitted for binary files");
Py_XDECREF(type);
@@ -2619,13 +2645,11 @@ array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
while (PyDict_Next(kwds, &pos, &key, &value)) {
char *str = NULL;
-#if defined(NPY_PY3K)
Py_XDECREF(str_key_obj);
str_key_obj = PyUnicode_AsASCIIString(key);
if (str_key_obj != NULL) {
key = str_key_obj;
}
-#endif
str = PyBytes_AsString(key);
@@ -3265,142 +3289,16 @@ array_datetime_data(PyObject *NPY_UNUSED(dummy), PyObject *args)
}
meta = get_datetime_metadata_from_dtype(dtype);
- Py_DECREF(dtype);
if (meta == NULL) {
+ Py_DECREF(dtype);
return NULL;
}
- return convert_datetime_metadata_to_tuple(meta);
-}
-
-#if !defined(NPY_PY3K)
-static PyObject *
-new_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args)
-{
- int size;
-
- if (!PyArg_ParseTuple(args, "i:buffer", &size)) {
- return NULL;
- }
- return PyBuffer_New(size);
-}
-
-static PyObject *
-buffer_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
-{
- PyObject *obj;
- Py_ssize_t offset = 0, n;
- Py_ssize_t size = Py_END_OF_BUFFER;
- void *unused;
- static char *kwlist[] = {"object", "offset", "size", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds,
- "O|" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT ":get_buffer", kwlist,
- &obj, &offset, &size)) {
- return NULL;
- }
- if (PyObject_AsWriteBuffer(obj, &unused, &n) < 0) {
- PyErr_Clear();
- return PyBuffer_FromObject(obj, offset, size);
- }
- else {
- return PyBuffer_FromReadWriteObject(obj, offset, size);
- }
-}
-#endif
-
-#ifndef _MSC_VER
-#include <setjmp.h>
-#include <signal.h>
-jmp_buf _NPY_SIGSEGV_BUF;
-static void
-_SigSegv_Handler(int signum)
-{
- longjmp(_NPY_SIGSEGV_BUF, signum);
-}
-#endif
-
-#define _test_code() { \
- test = *((char*)memptr); \
- if (!ro) { \
- *((char *)memptr) = '\0'; \
- *((char *)memptr) = test; \
- } \
- test = *((char*)memptr+size-1); \
- if (!ro) { \
- *((char *)memptr+size-1) = '\0'; \
- *((char *)memptr+size-1) = test; \
- } \
- }
-
-static PyObject *
-as_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
-{
- PyObject *mem;
- Py_ssize_t size;
- npy_bool ro = NPY_FALSE, check = NPY_TRUE;
- void *memptr;
- static char *kwlist[] = {"mem", "size", "readonly", "check", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds,
- "O" NPY_SSIZE_T_PYFMT "|O&O&:int_asbuffer", kwlist,
- &mem, &size, PyArray_BoolConverter, &ro,
- PyArray_BoolConverter, &check)) {
- return NULL;
- }
- memptr = PyLong_AsVoidPtr(mem);
- if (memptr == NULL) {
- return NULL;
- }
- if (check) {
- /*
- * Try to dereference the start and end of the memory region
- * Catch segfault and report error if it occurs
- */
- char test;
- int err = 0;
-
-#ifdef _MSC_VER
- __try {
- _test_code();
- }
- __except(1) {
- err = 1;
- }
-#else
- PyOS_sighandler_t _npy_sig_save;
- _npy_sig_save = PyOS_setsig(SIGSEGV, _SigSegv_Handler);
- if (setjmp(_NPY_SIGSEGV_BUF) == 0) {
- _test_code();
- }
- else {
- err = 1;
- }
- PyOS_setsig(SIGSEGV, _npy_sig_save);
-#endif
- if (err) {
- PyErr_SetString(PyExc_ValueError,
- "cannot use memory location as a buffer.");
- return NULL;
- }
- }
-
-
-#if defined(NPY_PY3K)
- PyErr_SetString(PyExc_RuntimeError,
- "XXX -- not implemented!");
- return NULL;
-#else
- if (ro) {
- return PyBuffer_FromMemory(memptr, size);
- }
- return PyBuffer_FromReadWriteMemory(memptr, size);
-#endif
+ PyObject *res = convert_datetime_metadata_to_tuple(meta);
+ Py_DECREF(dtype);
+ return res;
}
-#undef _test_code
-
-
/*
* Prints floating-point scalars using the Dragon4 algorithm, scientific mode.
* See docstring of `np.format_float_scientific` for description of arguments.
@@ -3981,11 +3879,6 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_
goto fail;
}
}
-#if !defined(NPY_PY3K)
- else if (PyInt_Check(max_work_obj)) {
- max_work = PyInt_AsSsize_t(max_work_obj);
- }
-#endif
else {
PyErr_SetString(PyExc_ValueError, "max_work must be an integer");
goto fail;
@@ -4209,17 +4102,6 @@ static struct PyMethodDef array_module_methods[] = {
{"is_busday",
(PyCFunction)array_is_busday,
METH_VARARGS | METH_KEYWORDS, NULL},
-#if !defined(NPY_PY3K)
- {"newbuffer",
- (PyCFunction)new_buffer,
- METH_VARARGS, NULL},
- {"getbuffer",
- (PyCFunction)buffer_buffer,
- METH_VARARGS | METH_KEYWORDS, NULL},
-#endif
- {"int_asbuffer",
- (PyCFunction)as_buffer,
- METH_VARARGS | METH_KEYWORDS, NULL},
{"format_longfloat",
(PyCFunction)format_longfloat,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -4299,11 +4181,6 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
if (PyType_Ready(&PyBool_Type) < 0) {
return -1;
}
-#if !defined(NPY_PY3K)
- if (PyType_Ready(&PyInt_Type) < 0) {
- return -1;
- }
-#endif
if (PyType_Ready(&PyFloat_Type) < 0) {
return -1;
}
@@ -4354,27 +4231,6 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
return -1; \
}
-/*
- * In Py3K, int is no longer a fixed-width integer type, so don't
- * inherit numpy.int_ from it.
- */
-#if defined(NPY_PY3K)
-#define INHERIT_INT(child, parent2) \
- SINGLE_INHERIT(child, parent2);
-#else
-#define INHERIT_INT(child, parent2) \
- Py##child##ArrType_Type.tp_flags |= Py_TPFLAGS_INT_SUBCLASS; \
- DUAL_INHERIT(child, Int, parent2);
-#endif
-
-#if defined(NPY_PY3K)
-#define DUAL_INHERIT_COMPARE(child, parent1, parent2)
-#else
-#define DUAL_INHERIT_COMPARE(child, parent1, parent2) \
- Py##child##ArrType_Type.tp_compare = \
- Py##parent1##_Type.tp_compare;
-#endif
-
#define DUAL_INHERIT2(child, parent1, parent2) \
Py##child##ArrType_Type.tp_base = &Py##parent1##_Type; \
Py##child##ArrType_Type.tp_bases = \
@@ -4382,7 +4238,6 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
&Py##parent2##ArrType_Type); \
Py##child##ArrType_Type.tp_richcompare = \
Py##parent1##_Type.tp_richcompare; \
- DUAL_INHERIT_COMPARE(child, parent1, parent2) \
Py##child##ArrType_Type.tp_hash = Py##parent1##_Type.tp_hash; \
if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \
PyErr_Print(); \
@@ -4395,20 +4250,9 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
SINGLE_INHERIT(Bool, Generic);
SINGLE_INHERIT(Byte, SignedInteger);
SINGLE_INHERIT(Short, SignedInteger);
-
-#if NPY_SIZEOF_INT == NPY_SIZEOF_LONG
- INHERIT_INT(Int, SignedInteger);
-#else
SINGLE_INHERIT(Int, SignedInteger);
-#endif
-
- INHERIT_INT(Long, SignedInteger);
-
-#if NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG
- INHERIT_INT(LongLong, SignedInteger);
-#else
+ SINGLE_INHERIT(Long, SignedInteger);
SINGLE_INHERIT(LongLong, SignedInteger);
-#endif
/* Datetime doesn't fit in any category */
SINGLE_INHERIT(Datetime, Generic);
@@ -4447,9 +4291,7 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
#undef SINGLE_INHERIT
#undef DUAL_INHERIT
-#undef INHERIT_INT
#undef DUAL_INHERIT2
-#undef DUAL_INHERIT_COMPARE
/*
* Clean up string and unicode array types so they act more like
@@ -4531,7 +4373,6 @@ intern_strings(void)
npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2;
}
-#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_multiarray_umath",
@@ -4543,25 +4384,19 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
/* Initialization function for the module */
-#if defined(NPY_PY3K)
-#define RETVAL(x) x
PyMODINIT_FUNC PyInit__multiarray_umath(void) {
-#else
-#define RETVAL(x)
-PyMODINIT_FUNC init_multiarray_umath(void) {
-#endif
PyObject *m, *d, *s;
PyObject *c_api;
+ /* Initialize CPU features */
+ if (npy_cpu_init() < 0) {
+ goto err;
+ }
+
/* Create the module and add the functions */
-#if defined(NPY_PY3K)
m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule("_multiarray_umath", array_module_methods);
-#endif
if (!m) {
goto err;
}
@@ -4685,6 +4520,16 @@ PyMODINIT_FUNC init_multiarray_umath(void) {
PyDict_SetItemString(d, "__version__", s);
Py_DECREF(s);
+ s = npy_cpu_features_dict();
+ if (s == NULL) {
+ goto err;
+ }
+ if (PyDict_SetItemString(d, "__cpu_features__", s) < 0) {
+ Py_DECREF(s);
+ goto err;
+ }
+ Py_DECREF(s);
+
s = NpyCapsule_FromVoidPtr((void *)_datetime_strings, NULL);
if (s == NULL) {
goto err;
@@ -4746,12 +4591,12 @@ PyMODINIT_FUNC init_multiarray_umath(void) {
if (initumath(m) != 0) {
goto err;
}
- return RETVAL(m);
+ return m;
err:
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"cannot load multiarray module.");
}
- return RETVAL(NULL);
+ return NULL;
}
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index db0bfcece..e7fe0fa50 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -371,8 +371,8 @@ NpyIter_ResetToIterIndexRange(NpyIter *iter,
}
if (errmsg == NULL) {
PyErr_Format(PyExc_ValueError,
- "Out-of-bounds range [%d, %d) passed to "
- "ResetToIterIndexRange", (int)istart, (int)iend);
+ "Out-of-bounds range [%" NPY_INTP_FMT ", %" NPY_INTP_FMT ") passed to "
+ "ResetToIterIndexRange", istart, iend);
}
else {
*errmsg = "Out-of-bounds range passed to ResetToIterIndexRange";
@@ -382,8 +382,8 @@ NpyIter_ResetToIterIndexRange(NpyIter *iter,
else if (iend < istart) {
if (errmsg == NULL) {
PyErr_Format(PyExc_ValueError,
- "Invalid range [%d, %d) passed to ResetToIterIndexRange",
- (int)istart, (int)iend);
+ "Invalid range [%" NPY_INTP_FMT ", %" NPY_INTP_FMT ") passed to ResetToIterIndexRange",
+ istart, iend);
}
else {
*errmsg = "Invalid range passed to ResetToIterIndexRange";
@@ -1429,8 +1429,8 @@ NpyIter_DebugPrint(NpyIter *iter)
printf("REUSE_REDUCE_LOOPS ");
printf("\n");
- printf("| NDim: %d\n", (int)ndim);
- printf("| NOp: %d\n", (int)nop);
+ printf("| NDim: %d\n", ndim);
+ printf("| NOp: %d\n", nop);
if (NIT_MASKOP(iter) >= 0) {
printf("| MaskOp: %d\n", (int)NIT_MASKOP(iter));
}
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index d40836dc2..e40a2d594 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -56,7 +56,7 @@ static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
const npy_uint32 *op_flags, int **op_axes,
- npy_intp *itershape);
+ npy_intp const *itershape);
static void
npyiter_replace_axisdata(NpyIter *iter, int iop,
PyArrayObject *op,
@@ -80,7 +80,7 @@ npyiter_get_common_dtype(int nop, PyArrayObject **op,
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
- int op_ndim, npy_intp *shape,
+ int op_ndim, npy_intp const *shape,
PyArray_Descr *op_dtype, const int *op_axes);
static int
npyiter_allocate_arrays(NpyIter *iter,
@@ -154,7 +154,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
if (nop > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
"Cannot construct an iterator with more than %d operands "
- "(%d were requested)", (int)NPY_MAXARGS, (int)nop);
+ "(%d were requested)", NPY_MAXARGS, nop);
return NULL;
}
@@ -810,7 +810,7 @@ npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
PyErr_Format(PyExc_ValueError,
"Cannot construct an iterator with more than %d dimensions "
"(%d were requested for op_axes)",
- (int)NPY_MAXDIMS, oa_ndim);
+ NPY_MAXDIMS, oa_ndim);
return 0;
}
if (op_axes == NULL) {
@@ -826,14 +826,14 @@ npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
if (axes != NULL) {
memset(axes_dupcheck, 0, NPY_MAXDIMS);
for (idim = 0; idim < oa_ndim; ++idim) {
- npy_intp i = axes[idim];
+ int i = axes[idim];
if (i >= 0) {
if (i >= NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
"The 'op_axes' provided to the iterator "
"constructor for operand %d "
"contained invalid "
- "values %d", (int)iop, (int)i);
+ "values %d", iop, i);
return 0;
}
else if (axes_dupcheck[i] == 1) {
@@ -841,7 +841,7 @@ npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
"The 'op_axes' provided to the iterator "
"constructor for operand %d "
"contained duplicate "
- "value %d", (int)iop, (int)i);
+ "value %d", iop, i);
return 0;
}
else {
@@ -1311,7 +1311,7 @@ npyiter_check_casting(int nop, PyArrayObject **op,
PyObject *errmsg;
errmsg = PyUString_FromFormat(
"Iterator operand %d dtype could not be cast from ",
- (int)iop);
+ iop);
PyUString_ConcatAndDel(&errmsg,
PyObject_Repr((PyObject *)PyArray_DESCR(op[iop])));
PyUString_ConcatAndDel(&errmsg,
@@ -1342,7 +1342,7 @@ npyiter_check_casting(int nop, PyArrayObject **op,
PyUString_ConcatAndDel(&errmsg,
PyUString_FromFormat(", the operand %d dtype, "
"according to the rule %s",
- (int)iop,
+ iop,
npyiter_casting_to_string(casting)));
PyErr_SetObject(PyExc_TypeError, errmsg);
Py_DECREF(errmsg);
@@ -1424,7 +1424,7 @@ static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
const npy_uint32 *op_flags, int **op_axes,
- npy_intp *itershape)
+ npy_intp const *itershape)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
@@ -1500,8 +1500,8 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf
"Iterator input op_axes[%d][%d] (==%d) "
"is not a valid axis of op[%d], which "
"has %d dimensions ",
- (int)iop, (int)(ndim-idim-1), (int)i,
- (int)iop, (int)ondim);
+ iop, (ndim-idim-1), i,
+ iop, ondim);
return 0;
}
}
@@ -2476,7 +2476,7 @@ npyiter_get_common_dtype(int nop, PyArrayObject **op,
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
- int op_ndim, npy_intp *shape,
+ int op_ndim, npy_intp const *shape,
PyArray_Descr *op_dtype, const int *op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 4b9d41aa4..505c5a841 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -17,6 +17,7 @@
#include "npy_pycompat.h"
#include "alloc.h"
#include "common.h"
+#include "conversion_utils.h"
#include "ctors.h"
/* Functions not part of the public NumPy C API */
@@ -748,7 +749,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
int oa_ndim = -1;
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
- PyArray_Dims itershape = {NULL, 0};
+ PyArray_Dims itershape = {NULL, -1};
int buffersize = 0;
if (self->iter != NULL) {
@@ -765,7 +766,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
npyiter_order_converter, &order,
PyArray_CastingConverter, &casting,
&op_axes_in,
- PyArray_IntpConverter, &itershape,
+ PyArray_OptionalIntpConverter, &itershape,
&buffersize)) {
npy_free_cache_dim_obj(itershape);
return -1;
@@ -800,7 +801,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
}
}
- if (itershape.len > 0) {
+ if (itershape.len != -1) {
if (oa_ndim == -1) {
oa_ndim = itershape.len;
memset(op_axes, 0, sizeof(op_axes[0]) * nop);
@@ -812,10 +813,6 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
goto fail;
}
}
- else if (itershape.ptr != NULL) {
- npy_free_cache_dim_obj(itershape);
- itershape.ptr = NULL;
- }
self->iter = NpyIter_AdvancedNew(nop, op, flags, order, casting, op_flags,
op_request_dtypes,
@@ -2016,7 +2013,7 @@ npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i)
if (i < 0 || i >= nop) {
PyErr_Format(PyExc_IndexError,
- "Iterator operand index %d is out of bounds", (int)i_orig);
+ "Iterator operand index %zd is out of bounds", i_orig);
return NULL;
}
@@ -2030,7 +2027,7 @@ npyiter_seq_item(NewNpyArrayIterObject *self, Py_ssize_t i)
*/
if (!self->readflags[i]) {
PyErr_Format(PyExc_RuntimeError,
- "Iterator operand %d is write-only", (int)i);
+ "Iterator operand %zd is write-only", i);
return NULL;
}
#endif
@@ -2147,12 +2144,12 @@ npyiter_seq_ass_item(NewNpyArrayIterObject *self, Py_ssize_t i, PyObject *v)
if (i < 0 || i >= nop) {
PyErr_Format(PyExc_IndexError,
- "Iterator operand index %d is out of bounds", (int)i_orig);
+ "Iterator operand index %zd is out of bounds", i_orig);
return -1;
}
if (!self->writeflags[i]) {
PyErr_Format(PyExc_RuntimeError,
- "Iterator operand %d is not writeable", (int)i_orig);
+ "Iterator operand %zd is not writeable", i_orig);
return -1;
}
@@ -2490,61 +2487,17 @@ NPY_NO_EXPORT PyMappingMethods npyiter_as_mapping = {
};
NPY_NO_EXPORT PyTypeObject NpyIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.nditer", /* tp_name */
- sizeof(NewNpyArrayIterObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)npyiter_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- &npyiter_as_sequence, /* tp_as_sequence */
- &npyiter_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- (iternextfunc)npyiter_next, /* tp_iternext */
- npyiter_methods, /* tp_methods */
- npyiter_members, /* tp_members */
- npyiter_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)npyiter_init, /* tp_init */
- 0, /* tp_alloc */
- npyiter_new, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.nditer",
+ .tp_basicsize = sizeof(NewNpyArrayIterObject),
+ .tp_dealloc = (destructor)npyiter_dealloc,
+ .tp_as_sequence = &npyiter_as_sequence,
+ .tp_as_mapping = &npyiter_as_mapping,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_iternext = (iternextfunc)npyiter_next,
+ .tp_methods = npyiter_methods,
+ .tp_members = npyiter_members,
+ .tp_getset = npyiter_getsets,
+ .tp_init = (initproc)npyiter_init,
+ .tp_new = npyiter_new,
};
diff --git a/numpy/core/src/multiarray/buffer.h b/numpy/core/src/multiarray/npy_buffer.h
index fae413c85..2eb97c4b9 100644
--- a/numpy/core/src/multiarray/buffer.h
+++ b/numpy/core/src/multiarray/npy_buffer.h
@@ -7,7 +7,7 @@ NPY_NO_EXPORT void
_dealloc_cached_buffer_info(PyObject *self);
NPY_NO_EXPORT PyArray_Descr*
-_descriptor_from_pep3118_format(char *s);
+_descriptor_from_pep3118_format(char const *s);
NPY_NO_EXPORT int
gentype_getbuffer(PyObject *obj, Py_buffer *view, int flags);
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index dabc866ff..19ac7d7f9 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -32,10 +32,6 @@ static PyObject *
array_inplace_subtract(PyArrayObject *m1, PyObject *m2);
static PyObject *
array_inplace_multiply(PyArrayObject *m1, PyObject *m2);
-#if !defined(NPY_PY3K)
-static PyObject *
-array_inplace_divide(PyArrayObject *m1, PyObject *m2);
-#endif
static PyObject *
array_inplace_true_divide(PyArrayObject *m1, PyObject *m2);
static PyObject *
@@ -61,8 +57,11 @@ array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo
*/
/* FIXME - macro contains a return */
-#define SET(op) temp = PyDict_GetItemString(dict, #op); \
- if (temp != NULL) { \
+#define SET(op) temp = _PyDict_GetItemStringWithError(dict, #op); \
+ if (temp == NULL && PyErr_Occurred()) { \
+ return -1; \
+ } \
+ else if (temp != NULL) { \
if (!(PyCallable_Check(temp))) { \
return -1; \
} \
@@ -353,20 +352,6 @@ array_multiply(PyArrayObject *m1, PyObject *m2)
return PyArray_GenericBinaryFunction(m1, m2, n_ops.multiply);
}
-#if !defined(NPY_PY3K)
-static PyObject *
-array_divide(PyArrayObject *m1, PyObject *m2)
-{
- PyObject *res;
-
- BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_divide, array_divide);
- if (try_binary_elide(m1, m2, &array_inplace_divide, &res, 0)) {
- return res;
- }
- return PyArray_GenericBinaryFunction(m1, m2, n_ops.divide);
-}
-#endif
-
static PyObject *
array_remainder(PyArrayObject *m1, PyObject *m2)
{
@@ -381,7 +366,6 @@ array_divmod(PyArrayObject *m1, PyObject *m2)
return PyArray_GenericBinaryFunction(m1, m2, n_ops.divmod);
}
-#if PY_VERSION_HEX >= 0x03050000
/* Need this to be version dependent on account of the slot check */
static PyObject *
array_matrix_multiply(PyArrayObject *m1, PyObject *m2)
@@ -399,7 +383,6 @@ array_inplace_matrix_multiply(
"Use 'a = a @ b' instead of 'a @= b'.");
return NULL;
}
-#endif
/*
* Determine if object is a scalar and if so, convert the object
@@ -728,16 +711,6 @@ array_inplace_multiply(PyArrayObject *m1, PyObject *m2)
return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.multiply);
}
-#if !defined(NPY_PY3K)
-static PyObject *
-array_inplace_divide(PyArrayObject *m1, PyObject *m2)
-{
- INPLACE_GIVE_UP_IF_NEEDED(
- m1, m2, nb_inplace_divide, array_inplace_divide);
- return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.divide);
-}
-#endif
-
static PyObject *
array_inplace_remainder(PyArrayObject *m1, PyObject *m2)
{
@@ -931,67 +904,12 @@ array_float(PyArrayObject *v)
return array_scalar_forward(v, &PyNumber_Float, " in ndarray.__float__");
}
-#if defined(NPY_PY3K)
-
NPY_NO_EXPORT PyObject *
array_int(PyArrayObject *v)
{
return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__int__");
}
-#else
-
-NPY_NO_EXPORT PyObject *
-array_int(PyArrayObject *v)
-{
- return array_scalar_forward(v, &PyNumber_Int, " in ndarray.__int__");
-}
-
-NPY_NO_EXPORT PyObject *
-array_long(PyArrayObject *v)
-{
- return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__long__");
-}
-
-/* hex and oct aren't exposed to the C api, but we need a function pointer */
-static PyObject *
-_PyNumber_Oct(PyObject *o) {
- PyObject *res;
- PyObject *mod = PyImport_ImportModule("__builtin__");
- if (mod == NULL) {
- return NULL;
- }
- res = PyObject_CallMethod(mod, "oct", "(O)", o);
- Py_DECREF(mod);
- return res;
-}
-
-static PyObject *
-_PyNumber_Hex(PyObject *o) {
- PyObject *res;
- PyObject *mod = PyImport_ImportModule("__builtin__");
- if (mod == NULL) {
- return NULL;
- }
- res = PyObject_CallMethod(mod, "hex", "(O)", o);
- Py_DECREF(mod);
- return res;
-}
-
-NPY_NO_EXPORT PyObject *
-array_oct(PyArrayObject *v)
-{
- return array_scalar_forward(v, &_PyNumber_Oct, " in ndarray.__oct__");
-}
-
-NPY_NO_EXPORT PyObject *
-array_hex(PyArrayObject *v)
-{
- return array_scalar_forward(v, &_PyNumber_Hex, " in ndarray.__hex__");
-}
-
-#endif
-
static PyObject *
array_index(PyArrayObject *v)
{
@@ -1005,65 +923,43 @@ array_index(PyArrayObject *v)
NPY_NO_EXPORT PyNumberMethods array_as_number = {
- (binaryfunc)array_add, /*nb_add*/
- (binaryfunc)array_subtract, /*nb_subtract*/
- (binaryfunc)array_multiply, /*nb_multiply*/
-#if !defined(NPY_PY3K)
- (binaryfunc)array_divide, /*nb_divide*/
-#endif
- (binaryfunc)array_remainder, /*nb_remainder*/
- (binaryfunc)array_divmod, /*nb_divmod*/
- (ternaryfunc)array_power, /*nb_power*/
- (unaryfunc)array_negative, /*nb_neg*/
- (unaryfunc)array_positive, /*nb_pos*/
- (unaryfunc)array_absolute, /*(unaryfunc)array_abs,*/
- (inquiry)_array_nonzero, /*nb_nonzero*/
- (unaryfunc)array_invert, /*nb_invert*/
- (binaryfunc)array_left_shift, /*nb_lshift*/
- (binaryfunc)array_right_shift, /*nb_rshift*/
- (binaryfunc)array_bitwise_and, /*nb_and*/
- (binaryfunc)array_bitwise_xor, /*nb_xor*/
- (binaryfunc)array_bitwise_or, /*nb_or*/
-#if !defined(NPY_PY3K)
- 0, /*nb_coerce*/
-#endif
- (unaryfunc)array_int, /*nb_int*/
-#if defined(NPY_PY3K)
- 0, /*nb_reserved*/
-#else
- (unaryfunc)array_long, /*nb_long*/
-#endif
- (unaryfunc)array_float, /*nb_float*/
-#if !defined(NPY_PY3K)
- (unaryfunc)array_oct, /*nb_oct*/
- (unaryfunc)array_hex, /*nb_hex*/
-#endif
-
- /*
- * This code adds augmented assignment functionality
- * that was made available in Python 2.0
- */
- (binaryfunc)array_inplace_add, /*nb_inplace_add*/
- (binaryfunc)array_inplace_subtract, /*nb_inplace_subtract*/
- (binaryfunc)array_inplace_multiply, /*nb_inplace_multiply*/
-#if !defined(NPY_PY3K)
- (binaryfunc)array_inplace_divide, /*nb_inplace_divide*/
-#endif
- (binaryfunc)array_inplace_remainder, /*nb_inplace_remainder*/
- (ternaryfunc)array_inplace_power, /*nb_inplace_power*/
- (binaryfunc)array_inplace_left_shift, /*nb_inplace_lshift*/
- (binaryfunc)array_inplace_right_shift, /*nb_inplace_rshift*/
- (binaryfunc)array_inplace_bitwise_and, /*nb_inplace_and*/
- (binaryfunc)array_inplace_bitwise_xor, /*nb_inplace_xor*/
- (binaryfunc)array_inplace_bitwise_or, /*nb_inplace_or*/
-
- (binaryfunc)array_floor_divide, /*nb_floor_divide*/
- (binaryfunc)array_true_divide, /*nb_true_divide*/
- (binaryfunc)array_inplace_floor_divide, /*nb_inplace_floor_divide*/
- (binaryfunc)array_inplace_true_divide, /*nb_inplace_true_divide*/
- (unaryfunc)array_index, /*nb_index */
-#if PY_VERSION_HEX >= 0x03050000
- (binaryfunc)array_matrix_multiply, /*nb_matrix_multiply*/
- (binaryfunc)array_inplace_matrix_multiply, /*nb_inplace_matrix_multiply*/
-#endif
+ .nb_add = (binaryfunc)array_add,
+ .nb_subtract = (binaryfunc)array_subtract,
+ .nb_multiply = (binaryfunc)array_multiply,
+ .nb_remainder = (binaryfunc)array_remainder,
+ .nb_divmod = (binaryfunc)array_divmod,
+ .nb_power = (ternaryfunc)array_power,
+ .nb_negative = (unaryfunc)array_negative,
+ .nb_positive = (unaryfunc)array_positive,
+ .nb_absolute = (unaryfunc)array_absolute,
+ .nb_bool = (inquiry)_array_nonzero,
+ .nb_invert = (unaryfunc)array_invert,
+ .nb_lshift = (binaryfunc)array_left_shift,
+ .nb_rshift = (binaryfunc)array_right_shift,
+ .nb_and = (binaryfunc)array_bitwise_and,
+ .nb_xor = (binaryfunc)array_bitwise_xor,
+ .nb_or = (binaryfunc)array_bitwise_or,
+
+ .nb_int = (unaryfunc)array_int,
+ .nb_float = (unaryfunc)array_float,
+ .nb_index = (unaryfunc)array_index,
+
+ .nb_inplace_add = (binaryfunc)array_inplace_add,
+ .nb_inplace_subtract = (binaryfunc)array_inplace_subtract,
+ .nb_inplace_multiply = (binaryfunc)array_inplace_multiply,
+ .nb_inplace_remainder = (binaryfunc)array_inplace_remainder,
+ .nb_inplace_power = (ternaryfunc)array_inplace_power,
+ .nb_inplace_lshift = (binaryfunc)array_inplace_left_shift,
+ .nb_inplace_rshift = (binaryfunc)array_inplace_right_shift,
+ .nb_inplace_and = (binaryfunc)array_inplace_bitwise_and,
+ .nb_inplace_xor = (binaryfunc)array_inplace_bitwise_xor,
+ .nb_inplace_or = (binaryfunc)array_inplace_bitwise_or,
+
+ .nb_floor_divide = (binaryfunc)array_floor_divide,
+ .nb_true_divide = (binaryfunc)array_true_divide,
+ .nb_inplace_floor_divide = (binaryfunc)array_inplace_floor_divide,
+ .nb_inplace_true_divide = (binaryfunc)array_inplace_true_divide,
+
+ .nb_matrix_multiply = (binaryfunc)array_matrix_multiply,
+ .nb_inplace_matrix_multiply = (binaryfunc)array_inplace_matrix_multiply,
};
diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c
index 6033929d9..c869b5eea 100644
--- a/numpy/core/src/multiarray/refcount.c
+++ b/numpy/core/src/multiarray/refcount.c
@@ -46,7 +46,7 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
Py_ssize_t pos = 0;
while (PyDict_Next(descr->fields, &pos, &key, &value)) {
- if NPY_TITLE_KEY(key, value) {
+ if (NPY_TITLE_KEY(key, value)) {
continue;
}
if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset,
@@ -108,7 +108,7 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
Py_ssize_t pos = 0;
while (PyDict_Next(descr->fields, &pos, &key, &value)) {
- if NPY_TITLE_KEY(key, value) {
+ if (NPY_TITLE_KEY(key, value)) {
continue;
}
if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset,
@@ -318,7 +318,7 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
Py_ssize_t pos = 0;
while (PyDict_Next(dtype->fields, &pos, &key, &value)) {
- if NPY_TITLE_KEY(key, value) {
+ if (NPY_TITLE_KEY(key, value)) {
continue;
}
if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) {
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index b669a3e76..8a7139fb2 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -45,7 +45,7 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
type_num = descr->type_num;
}
switch (type_num) {
-#define CASE(ut,lt) case NPY_##ut: return &(((Py##lt##ScalarObject *)scalar)->obval)
+#define CASE(ut,lt) case NPY_##ut: return &PyArrayScalar_VAL(scalar, lt)
CASE(BOOL, Bool);
CASE(BYTE, Byte);
CASE(UBYTE, UByte);
@@ -71,9 +71,19 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
case NPY_STRING:
return (void *)PyString_AS_STRING(scalar);
case NPY_UNICODE:
- return (void *)PyUnicode_AS_DATA(scalar);
+ /* lazy initialization, to reduce the memory used by string scalars */
+ if (PyArrayScalar_VAL(scalar, Unicode) == NULL) {
+ Py_UCS4 *raw_data = PyUnicode_AsUCS4Copy(scalar);
+ if (raw_data == NULL) {
+ return NULL;
+ }
+ PyArrayScalar_VAL(scalar, Unicode) = raw_data;
+ return (void *)raw_data;
+ }
+ return PyArrayScalar_VAL(scalar, Unicode);
case NPY_VOID:
- return ((PyVoidScalarObject *)scalar)->obval;
+ /* Note: no & needed here, so can't use CASE */
+ return PyArrayScalar_VAL(scalar, Void);
}
/*
@@ -81,14 +91,13 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
* scalar it inherits from.
*/
-#define _CHK(cls) (PyObject_IsInstance(scalar, \
- (PyObject *)&Py##cls##ArrType_Type))
-#define _OBJ(lt) &(((Py##lt##ScalarObject *)scalar)->obval)
-#define _IFCASE(cls) if _CHK(cls) return _OBJ(cls)
+#define _CHK(cls) PyObject_IsInstance(scalar, \
+ (PyObject *)&Py##cls##ArrType_Type)
+#define _IFCASE(cls) if (_CHK(cls)) return &PyArrayScalar_VAL(scalar, cls)
- if _CHK(Number) {
- if _CHK(Integer) {
- if _CHK(SignedInteger) {
+ if (_CHK(Number)) {
+ if (_CHK(Integer)) {
+ if (_CHK(SignedInteger)) {
_IFCASE(Byte);
_IFCASE(Short);
_IFCASE(Int);
@@ -107,7 +116,7 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
}
else {
/* Inexact */
- if _CHK(Floating) {
+ if (_CHK(Floating)) {
_IFCASE(Half);
_IFCASE(Float);
_IFCASE(Double);
@@ -122,10 +131,10 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
}
}
else if (_CHK(Bool)) {
- return _OBJ(Bool);
+ return &PyArrayScalar_VAL(scalar, Bool);
}
else if (_CHK(Datetime)) {
- return _OBJ(Datetime);
+ return &PyArrayScalar_VAL(scalar, Datetime);
}
else if (_CHK(Flexible)) {
if (_CHK(String)) {
@@ -135,7 +144,8 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
return (void *)PyUnicode_AS_DATA(scalar);
}
if (_CHK(Void)) {
- return ((PyVoidScalarObject *)scalar)->obval;
+ /* Note: no & needed here, so can't use _IFCASE */
+ return PyArrayScalar_VAL(scalar, Void);
}
}
else {
@@ -156,7 +166,6 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
}
return (void *)memloc;
#undef _IFCASE
-#undef _OBJ
#undef _CHK
}
@@ -319,21 +328,10 @@ PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode)
memptr = scalar_value(scalar, typecode);
-#ifndef Py_UNICODE_WIDE
- if (typecode->type_num == NPY_UNICODE) {
- PyUCS2Buffer_AsUCS4((Py_UNICODE *)memptr,
- (npy_ucs4 *)PyArray_DATA(r),
- PyUnicode_GET_SIZE(scalar),
- PyArray_ITEMSIZE(r) >> 2);
- }
- else
-#endif
- {
- memcpy(PyArray_DATA(r), memptr, PyArray_ITEMSIZE(r));
- if (PyDataType_FLAGCHK(typecode, NPY_ITEM_HASOBJECT)) {
- /* Need to INCREF just the PyObject portion */
- PyArray_Item_INCREF(memptr, typecode);
- }
+ memcpy(PyArray_DATA(r), memptr, PyArray_ITEMSIZE(r));
+ if (PyDataType_FLAGCHK(typecode, NPY_ITEM_HASOBJECT)) {
+ /* Need to INCREF just the PyObject portion */
+ PyArray_Item_INCREF(memptr, typecode);
}
finish:
@@ -425,37 +423,69 @@ PyArray_ScalarFromObject(PyObject *object)
NPY_NO_EXPORT PyArray_Descr *
PyArray_DescrFromTypeObject(PyObject *type)
{
- int typenum;
- PyArray_Descr *new, *conv = NULL;
-
/* if it's a builtin type, then use the typenumber */
- typenum = _typenum_fromtypeobj(type,1);
+ int typenum = _typenum_fromtypeobj(type,1);
if (typenum != NPY_NOTYPE) {
- new = PyArray_DescrFromType(typenum);
- return new;
+ return PyArray_DescrFromType(typenum);
}
/* Check the generic types */
if ((type == (PyObject *) &PyNumberArrType_Type) ||
(type == (PyObject *) &PyInexactArrType_Type) ||
(type == (PyObject *) &PyFloatingArrType_Type)) {
+ if (DEPRECATE("Converting `np.inexact` or `np.floating` to "
+ "a dtype is deprecated. The current result is `float64` "
+ "which is not strictly correct.") < 0) {
+ return NULL;
+ }
typenum = NPY_DOUBLE;
}
else if (type == (PyObject *)&PyComplexFloatingArrType_Type) {
+ if (DEPRECATE("Converting `np.complex` to a dtype is deprecated. "
+ "The current result is `complex128` which is not "
+ "strictly correct.") < 0) {
+ return NULL;
+ }
typenum = NPY_CDOUBLE;
}
else if ((type == (PyObject *)&PyIntegerArrType_Type) ||
(type == (PyObject *)&PySignedIntegerArrType_Type)) {
+ if (DEPRECATE("Converting `np.integer` or `np.signedinteger` to "
+ "a dtype is deprecated. The current result is "
+ "`np.dtype(np.int_)` which is not strictly correct. "
+ "Note that the result depends on the system. To ensure "
+ "stable results use may want to use `np.int64` or "
+ "`np.int32`.") < 0) {
+ return NULL;
+ }
typenum = NPY_LONG;
}
else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) {
+ if (DEPRECATE("Converting `np.unsignedinteger` to a dtype is "
+ "deprecated. The current result is `np.dtype(np.uint)` "
+ "which is not strictly correct. Note that the result "
+ "depends on the system. To ensure stable results you may "
+ "want to use `np.uint64` or `np.uint32`.") < 0) {
+ return NULL;
+ }
typenum = NPY_ULONG;
}
else if (type == (PyObject *) &PyCharacterArrType_Type) {
+ if (DEPRECATE("Converting `np.character` to a dtype is deprecated. "
+ "The current result is `np.dtype(np.str_)` "
+ "which is not strictly correct. Note that `np.character` "
+ "is generally deprecated and 'S1' should be used.") < 0) {
+ return NULL;
+ }
typenum = NPY_STRING;
}
else if ((type == (PyObject *) &PyGenericArrType_Type) ||
(type == (PyObject *) &PyFlexibleArrType_Type)) {
+ if (DEPRECATE("Converting `np.generic` to a dtype is "
+ "deprecated. The current result is `np.dtype(np.void)` "
+ "which is not strictly correct.") < 0) {
+ return NULL;
+ }
typenum = NPY_VOID;
}
@@ -470,11 +500,12 @@ PyArray_DescrFromTypeObject(PyObject *type)
/* Do special thing for VOID sub-types */
if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) {
- new = PyArray_DescrNewFromType(NPY_VOID);
+ PyArray_Descr *new = PyArray_DescrNewFromType(NPY_VOID);
if (new == NULL) {
return NULL;
}
- if (_arraydescr_from_dtype_attr(type, &conv)) {
+ PyArray_Descr *conv = _arraydescr_try_convert_from_dtype_attr(type);
+ if ((PyObject *)conv != Py_NotImplemented) {
if (conv == NULL) {
Py_DECREF(new);
return NULL;
@@ -486,8 +517,8 @@ PyArray_DescrFromTypeObject(PyObject *type)
new->elsize = conv->elsize;
new->subarray = conv->subarray;
conv->subarray = NULL;
- Py_DECREF(conv);
}
+ Py_DECREF(conv);
Py_XDECREF(new->typeobj);
new->typeobj = (PyTypeObject *)type;
Py_INCREF(type);
@@ -564,6 +595,9 @@ PyArray_DescrFromScalar(PyObject *sc)
}
descr = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(sc));
+ if (descr == NULL) {
+ return NULL;
+ }
if (PyDataType_ISUNSIZED(descr)) {
PyArray_DESCR_REPLACE(descr);
type_num = descr->type_num;
@@ -571,10 +605,7 @@ PyArray_DescrFromScalar(PyObject *sc)
descr->elsize = PyString_GET_SIZE(sc);
}
else if (type_num == NPY_UNICODE) {
- descr->elsize = PyUnicode_GET_DATA_SIZE(sc);
-#ifndef Py_UNICODE_WIDE
- descr->elsize <<= 1;
-#endif
+ descr->elsize = PyUnicode_GET_LENGTH(sc) * 4;
}
else {
PyArray_Descr *dtype;
@@ -656,25 +687,31 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
itemsize = (((itemsize - 1) >> 2) + 1) << 2;
}
}
-#if PY_VERSION_HEX >= 0x03030000
if (type_num == NPY_UNICODE) {
- PyObject *u, *args;
- int byteorder;
-
-#if NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
- byteorder = -1;
-#elif NPY_BYTE_ORDER == NPY_BIG_ENDIAN
- byteorder = +1;
-#else
- #error Endianness undefined ?
-#endif
- if (swap) byteorder *= -1;
-
- u = PyUnicode_DecodeUTF32(data, itemsize, NULL, &byteorder);
+ /* we need the full string length here, else copyswap will write too
+ many bytes */
+ void *buff = PyArray_malloc(descr->elsize);
+ if (buff == NULL) {
+ return PyErr_NoMemory();
+ }
+ /* copyswap needs an array object, but only actually cares about the
+ * dtype
+ */
+ PyArrayObject_fields dummy_arr;
+ if (base == NULL) {
+ dummy_arr.descr = descr;
+ base = (PyObject *)&dummy_arr;
+ }
+ copyswap(buff, data, swap, base);
+
+ /* truncation occurs here */
+ PyObject *u = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, buff, itemsize / 4);
+ PyArray_free(buff);
if (u == NULL) {
return NULL;
}
- args = Py_BuildValue("(O)", u);
+
+ PyObject *args = Py_BuildValue("(O)", u);
if (args == NULL) {
Py_DECREF(u);
return NULL;
@@ -684,7 +721,6 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
Py_DECREF(args);
return obj;
}
-#endif
if (type->tp_itemsize != 0) {
/* String type */
obj = type->tp_alloc(type, itemsize);
@@ -710,85 +746,9 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
if (type_num == NPY_STRING) {
destptr = PyString_AS_STRING(obj);
((PyStringObject *)obj)->ob_shash = -1;
-#if !defined(NPY_PY3K)
- ((PyStringObject *)obj)->ob_sstate = SSTATE_NOT_INTERNED;
-#endif
memcpy(destptr, data, itemsize);
return obj;
}
-#if PY_VERSION_HEX < 0x03030000
- else if (type_num == NPY_UNICODE) {
- /* tp_alloc inherited from Python PyBaseObject_Type */
- PyUnicodeObject *uni = (PyUnicodeObject*)obj;
- size_t length = itemsize >> 2;
- Py_UNICODE *dst;
-#ifndef Py_UNICODE_WIDE
- char *buffer;
- Py_UNICODE *tmp;
- int alloc = 0;
-
- length *= 2;
-#endif
- /* Set uni->str so that object can be deallocated on failure */
- uni->str = NULL;
- uni->defenc = NULL;
- uni->hash = -1;
- dst = PyObject_MALLOC(sizeof(Py_UNICODE) * (length + 1));
- if (dst == NULL) {
- Py_DECREF(obj);
- PyErr_NoMemory();
- return NULL;
- }
-#ifdef Py_UNICODE_WIDE
- memcpy(dst, data, itemsize);
- if (swap) {
- byte_swap_vector(dst, length, 4);
- }
- uni->str = dst;
- uni->str[length] = 0;
- uni->length = length;
-#else
- /* need aligned data buffer */
- if ((swap) || ((((npy_intp)data) % descr->alignment) != 0)) {
- buffer = malloc(itemsize);
- if (buffer == NULL) {
- PyObject_FREE(dst);
- Py_DECREF(obj);
- PyErr_NoMemory();
- }
- alloc = 1;
- memcpy(buffer, data, itemsize);
- if (swap) {
- byte_swap_vector(buffer, itemsize >> 2, 4);
- }
- }
- else {
- buffer = data;
- }
-
- /*
- * Allocated enough for 2-characters per itemsize.
- * Now convert from the data-buffer
- */
- length = PyUCS2Buffer_FromUCS4(dst,
- (npy_ucs4 *)buffer, itemsize >> 2);
- if (alloc) {
- free(buffer);
- }
- /* Resize the unicode result */
- tmp = PyObject_REALLOC(dst, sizeof(Py_UNICODE)*(length + 1));
- if (tmp == NULL) {
- PyObject_FREE(dst);
- Py_DECREF(obj);
- return NULL;
- }
- uni->str = tmp;
- uni->str[length] = 0;
- uni->length = length;
-#endif
- return obj;
- }
-#endif /* PY_VERSION_HEX < 0x03030000 */
else {
PyVoidScalarObject *vobj = (PyVoidScalarObject *)obj;
vobj->base = NULL;
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 9adca6773..2f1767391 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -28,7 +28,7 @@
#include "npy_import.h"
#include "dragon4.h"
#include "npy_longdouble.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include <stdlib.h>
@@ -54,63 +54,9 @@ NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type;
* Floating, ComplexFloating, Flexible, Character#
*/
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.@name@", /* tp_name*/
- sizeof(PyObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
- /* methods */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.@name@",
+ .tp_basicsize = sizeof(PyObject),
};
/**end repeat**/
@@ -226,20 +172,6 @@ gentype_@name@(PyObject *m1, PyObject *m2)
/**end repeat**/
-#if !defined(NPY_PY3K)
-/**begin repeat
- *
- * #name = divide#
- */
-static PyObject *
-gentype_@name@(PyObject *m1, PyObject *m2)
-{
- BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_@name@, gentype_@name@);
- return PyArray_Type.tp_as_number->nb_@name@(m1, m2);
-}
-/**end repeat**/
-#endif
-
/* Get a nested slot, or NULL if absent */
#define GET_NESTED_SLOT(type, group, slot) \
((type)->group == NULL ? NULL : (type)->group->slot)
@@ -293,27 +225,6 @@ gentype_@name@(PyObject *m1)
}
/**end repeat**/
-#if !defined(NPY_PY3K)
-/**begin repeat
- *
- * #name = long, oct, hex#
- */
-static PyObject *
-gentype_@name@(PyObject *m1)
-{
- PyObject *arr, *ret;
-
- arr = PyArray_FromScalar(m1, NULL);
- if (arr == NULL) {
- return NULL;
- }
- ret = Py_TYPE(arr)->tp_as_number->nb_@name@(arr);
- Py_DECREF(arr);
- return ret;
-}
-/**end repeat**/
-#endif
-
static int
gentype_nonzero_number(PyObject *m1)
{
@@ -324,11 +235,7 @@ gentype_nonzero_number(PyObject *m1)
if (arr == NULL) {
return -1;
}
-#if defined(NPY_PY3K)
ret = Py_TYPE(arr)->tp_as_number->nb_bool(arr);
-#else
- ret = Py_TYPE(arr)->tp_as_number->nb_nonzero(arr);
-#endif
Py_DECREF(arr);
return ret;
}
@@ -356,21 +263,9 @@ gentype_format(PyObject *self, PyObject *args)
PyObject *format_spec;
PyObject *obj, *ret;
-#if defined(NPY_PY3K)
if (!PyArg_ParseTuple(args, "U:__format__", &format_spec)) {
return NULL;
}
-#else
- if (!PyArg_ParseTuple(args, "O:__format__", &format_spec)) {
- return NULL;
- }
-
- if (!PyUnicode_Check(format_spec) && !PyString_Check(format_spec)) {
- PyErr_SetString(PyExc_TypeError,
- "format must be a string");
- return NULL;
- }
-#endif
/*
* Convert to an appropriate Python type and call its format.
@@ -378,14 +273,10 @@ gentype_format(PyObject *self, PyObject *args)
* because it throws away precision.
*/
if (Py_TYPE(self) == &PyBoolArrType_Type) {
- obj = PyBool_FromLong(((PyBoolScalarObject *)self)->obval);
+ obj = PyBool_FromLong(PyArrayScalar_VAL(self, Bool));
}
else if (PyArray_IsScalar(self, Integer)) {
-#if defined(NPY_PY3K)
obj = Py_TYPE(self)->tp_as_number->nb_int(self);
-#else
- obj = Py_TYPE(self)->tp_as_number->nb_long(self);
-#endif
}
else if (PyArray_IsScalar(self, Floating)) {
obj = Py_TYPE(self)->tp_as_number->nb_float(self);
@@ -454,6 +345,10 @@ format_@name@(@type@ val, npy_bool scientific,
* over-ride repr and str of array-scalar strings and unicode to
* remove NULL bytes and then call the corresponding functions
* of string and unicode.
+ *
+ * FIXME:
+ * is this really a good idea?
+ * stop using Py_UNICODE here.
*/
/**begin repeat
@@ -967,7 +862,7 @@ static PyObject *
static PyObject *
@name@type_@kind@(PyObject *self)
{
- return @name@type_@kind@_either(((Py@Name@ScalarObject *)self)->obval,
+ return @name@type_@kind@_either(PyArrayScalar_VAL(self, @Name@),
TrimMode_LeaveOneZero, TrimMode_DptZeros, 0);
}
@@ -975,7 +870,7 @@ static PyObject *
c@name@type_@kind@(PyObject *self)
{
PyObject *rstr, *istr, *ret;
- npy_c@name@ val = ((PyC@Name@ScalarObject *)self)->obval;
+ npy_c@name@ val = PyArrayScalar_VAL(self, C@Name@);
TrimMode trim = TrimMode_DptZeros;
if (npy_legacy_print_mode == 113) {
@@ -1039,7 +934,7 @@ c@name@type_@kind@(PyObject *self)
static PyObject *
halftype_@kind@(PyObject *self)
{
- npy_half val = ((PyHalfScalarObject *)self)->obval;
+ npy_half val = PyArrayScalar_VAL(self, Half);
float floatval = npy_half_to_float(val);
float absval;
@@ -1077,85 +972,29 @@ static PyObject *
return npy_longdouble_to_PyLong(val);
}
-#if !defined(NPY_PY3K)
-
-/**begin repeat1
- * #name = int, hex, oct#
- */
-static PyObject *
-@char@longdoubletype_@name@(PyObject *self)
-{
- PyObject *ret;
- PyObject *obj = @char@longdoubletype_long(self);
- if (obj == NULL) {
- return NULL;
- }
- ret = Py_TYPE(obj)->tp_as_number->nb_@name@(obj);
- Py_DECREF(obj);
- return ret;
-}
-/**end repeat1**/
-
-#endif /* !defined(NPY_PY3K) */
-
/**end repeat**/
static PyNumberMethods gentype_as_number = {
- (binaryfunc)gentype_add, /*nb_add*/
- (binaryfunc)gentype_subtract, /*nb_subtract*/
- (binaryfunc)gentype_multiply, /*nb_multiply*/
-#if !defined(NPY_PY3K)
- (binaryfunc)gentype_divide, /*nb_divide*/
-#endif
- (binaryfunc)gentype_remainder, /*nb_remainder*/
- (binaryfunc)gentype_divmod, /*nb_divmod*/
- (ternaryfunc)gentype_power, /*nb_power*/
- (unaryfunc)gentype_negative,
- (unaryfunc)gentype_positive, /*nb_pos*/
- (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/
- (inquiry)gentype_nonzero_number, /*nb_nonzero*/
- (unaryfunc)gentype_invert, /*nb_invert*/
- (binaryfunc)gentype_lshift, /*nb_lshift*/
- (binaryfunc)gentype_rshift, /*nb_rshift*/
- (binaryfunc)gentype_and, /*nb_and*/
- (binaryfunc)gentype_xor, /*nb_xor*/
- (binaryfunc)gentype_or, /*nb_or*/
-#if !defined(NPY_PY3K)
- 0, /*nb_coerce*/
-#endif
- (unaryfunc)gentype_int, /*nb_int*/
-#if defined(NPY_PY3K)
- 0, /*nb_reserved*/
-#else
- (unaryfunc)gentype_long, /*nb_long*/
-#endif
- (unaryfunc)gentype_float, /*nb_float*/
-#if !defined(NPY_PY3K)
- (unaryfunc)gentype_oct, /*nb_oct*/
- (unaryfunc)gentype_hex, /*nb_hex*/
-#endif
- 0, /*inplace_add*/
- 0, /*inplace_subtract*/
- 0, /*inplace_multiply*/
-#if !defined(NPY_PY3K)
- 0, /*inplace_divide*/
-#endif
- 0, /*inplace_remainder*/
- 0, /*inplace_power*/
- 0, /*inplace_lshift*/
- 0, /*inplace_rshift*/
- 0, /*inplace_and*/
- 0, /*inplace_xor*/
- 0, /*inplace_or*/
- (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/
- (binaryfunc)gentype_true_divide, /*nb_true_divide*/
- 0, /*nb_inplace_floor_divide*/
- 0, /*nb_inplace_true_divide*/
- (unaryfunc)NULL, /*nb_index*/
-#if PY_VERSION_HEX >= 0x03050000
- 0, /*np_matmul*/
- 0, /*np_inplace_matmul*/
-#endif
+ .nb_add = (binaryfunc)gentype_add,
+ .nb_subtract = (binaryfunc)gentype_subtract,
+ .nb_multiply = (binaryfunc)gentype_multiply,
+ .nb_remainder = (binaryfunc)gentype_remainder,
+ .nb_divmod = (binaryfunc)gentype_divmod,
+ .nb_power = (ternaryfunc)gentype_power,
+ .nb_negative = (unaryfunc)gentype_negative,
+ .nb_positive = (unaryfunc)gentype_positive,
+ .nb_absolute = (unaryfunc)gentype_absolute,
+ .nb_bool = (inquiry)gentype_nonzero_number,
+ .nb_invert = (unaryfunc)gentype_invert,
+ .nb_lshift = (binaryfunc)gentype_lshift,
+ .nb_rshift = (binaryfunc)gentype_rshift,
+ .nb_and = (binaryfunc)gentype_and,
+ .nb_xor = (binaryfunc)gentype_xor,
+ .nb_or = (binaryfunc)gentype_or,
+ .nb_int = (unaryfunc)gentype_int,
+ .nb_float = (unaryfunc)gentype_float,
+ .nb_floor_divide = (binaryfunc)gentype_floor_divide,
+ .nb_true_divide = (binaryfunc)gentype_true_divide,
};
@@ -1246,11 +1085,7 @@ inttype_denominator_get(PyObject *self)
static PyObject *
gentype_data_get(PyObject *self)
{
-#if defined(NPY_PY3K)
return PyMemoryView_FromObject(self);
-#else
- return PyBuffer_FromObject(self, 0, Py_END_OF_BUFFER);
-#endif
}
@@ -1263,11 +1098,6 @@ gentype_itemsize_get(PyObject *self)
typecode = PyArray_DescrFromScalar(self);
elsize = typecode->elsize;
-#ifndef Py_UNICODE_WIDE
- if (typecode->type_num == NPY_UNICODE) {
- elsize >>= 1;
- }
-#endif
ret = PyInt_FromLong((long) elsize);
Py_DECREF(typecode);
return ret;
@@ -1293,7 +1123,6 @@ gentype_sizeof(PyObject *self)
return PyLong_FromSsize_t(nbytes);
}
-#if PY_VERSION_HEX >= 0x03000000
NPY_NO_EXPORT void
gentype_struct_free(PyObject *ptr)
{
@@ -1307,17 +1136,6 @@ gentype_struct_free(PyObject *ptr)
PyArray_free(arrif->shape);
PyArray_free(arrif);
}
-#else
-NPY_NO_EXPORT void
-gentype_struct_free(void *ptr, void *arg)
-{
- PyArrayInterface *arrif = (PyArrayInterface *)ptr;
- Py_DECREF((PyObject *)arg);
- Py_XDECREF(arrif->descr);
- PyArray_free(arrif->shape);
- PyArray_free(arrif);
-}
-#endif
static PyObject *
gentype_struct_get(PyObject *self)
@@ -1438,7 +1256,7 @@ gentype_real_get(PyObject *self)
return ret;
}
else if (PyArray_IsScalar(self, Object)) {
- PyObject *obj = ((PyObjectScalarObject *)self)->obval;
+ PyObject *obj = PyArrayScalar_VAL(self, Object);
ret = PyObject_GetAttrString(obj, "real");
if (ret != NULL) {
return ret;
@@ -1463,7 +1281,7 @@ gentype_imag_get(PyObject *self)
ret = PyArray_Scalar(ptr + typecode->elsize, typecode, NULL);
}
else if (PyArray_IsScalar(self, Object)) {
- PyObject *obj = ((PyObjectScalarObject *)self)->obval;
+ PyObject *obj = PyArrayScalar_VAL(self, Object);
PyArray_Descr *newtype;
ret = PyObject_GetAttrString(obj, "imag");
if (ret == NULL) {
@@ -1671,9 +1489,6 @@ gentype_itemset(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args))
return NULL;
}
-static Py_ssize_t
-gentype_getreadbuf(PyObject *, Py_ssize_t, void **);
-
static PyObject *
gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds)
{
@@ -1696,8 +1511,9 @@ gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds)
PyObject *new;
char *newmem;
- gentype_getreadbuf(self, 0, (void **)&data);
descr = PyArray_DescrFromScalar(self);
+ data = (void *)scalar_value(self, descr);
+
newmem = PyObject_Malloc(descr->elsize);
if (newmem == NULL) {
Py_DECREF(descr);
@@ -1733,6 +1549,58 @@ gentype_@name@(PyObject *self, PyObject *args, PyObject *kwds)
}
/**end repeat**/
+
+/**begin repeat
+ * #name = integer, floating, complexfloating#
+ * #complex = 0, 0, 1#
+ */
+static PyObject *
+@name@type_dunder_round(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"ndigits", NULL};
+ PyObject *ndigits = Py_None;
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O:__round__", kwlist, &ndigits)) {
+ return NULL;
+ }
+
+#if @complex@
+ if (DEPRECATE("The Python built-in `round` is deprecated for complex "
+ "scalars, and will raise a `TypeError` in a future release. "
+ "Use `np.round` or `scalar.round` instead.") < 0) {
+ return NULL;
+ }
+#endif
+
+ PyObject *tup;
+ if (ndigits == Py_None) {
+ tup = PyTuple_Pack(0);
+ }
+ else {
+ tup = PyTuple_Pack(1, ndigits);
+ }
+
+ if (tup == NULL) {
+ return NULL;
+ }
+
+ PyObject *obj = gentype_round(self, tup, NULL);
+ Py_DECREF(tup);
+ if (obj == NULL) {
+ return NULL;
+ }
+
+#if !@complex@
+ if (ndigits == Py_None) {
+ PyObject *ret = PyNumber_Long(obj);
+ Py_DECREF(obj);
+ return ret;
+ }
+#endif
+
+ return obj;
+}
+/**end repeat**/
+
static PyObject *
voidtype_getfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds)
{
@@ -1831,9 +1699,7 @@ static PyObject *
gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
{
PyObject *ret = NULL, *obj = NULL, *mod = NULL;
-#if defined(NPY_PY3K)
Py_buffer view;
-#endif
const char *buffer;
Py_ssize_t buflen;
@@ -1843,13 +1709,7 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
return NULL;
}
-#if defined(NPY_PY3K)
- if (PyArray_IsScalar(self, Unicode)) {
- /* Unicode on Python 3 does not expose the buffer interface */
- buffer = PyUnicode_AS_DATA(self);
- buflen = PyUnicode_GET_DATA_SIZE(self);
- }
- else if (PyObject_GetBuffer(self, &view, PyBUF_SIMPLE) >= 0) {
+ if (PyObject_GetBuffer(self, &view, PyBUF_SIMPLE) >= 0) {
buffer = view.buf;
buflen = view.len;
/*
@@ -1865,12 +1725,6 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
Py_DECREF(ret);
return NULL;
}
-#else
- if (PyObject_AsReadBuffer(self, (const void **)&buffer, &buflen)<0) {
- Py_DECREF(ret);
- return NULL;
- }
-#endif
mod = PyImport_ImportModule("numpy.core._multiarray_umath");
if (mod == NULL) {
@@ -1884,52 +1738,39 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
PyTuple_SET_ITEM(ret, 0, obj);
obj = PyObject_GetAttrString((PyObject *)self, "dtype");
if (PyArray_IsScalar(self, Object)) {
- mod = ((PyObjectScalarObject *)self)->obval;
- PyTuple_SET_ITEM(ret, 1, Py_BuildValue("NO", obj, mod));
+ PyObject *val = PyArrayScalar_VAL(self, Object);
+ PyObject *tup = Py_BuildValue("NO", obj, val);
+ if (tup == NULL) {
+ return NULL;
+ }
+ PyTuple_SET_ITEM(ret, 1, tup);
}
- else {
-#ifndef Py_UNICODE_WIDE
- /*
- * We need to expand the buffer so that we always write
- * UCS4 to disk for pickle of unicode scalars.
- *
- * This could be in a unicode_reduce function, but
- * that would require re-factoring.
- */
- int alloc = 0;
- char *tmp;
- int newlen;
-
- if (PyArray_IsScalar(self, Unicode)) {
- tmp = PyArray_malloc(buflen*2);
- if (tmp == NULL) {
- Py_DECREF(ret);
- return PyErr_NoMemory();
- }
- alloc = 1;
- newlen = PyUCS2Buffer_AsUCS4((Py_UNICODE *)buffer,
- (npy_ucs4 *)tmp,
- buflen / 2, buflen / 2);
- buflen = newlen*4;
- buffer = tmp;
+ else if (obj && PyDataType_FLAGCHK((PyArray_Descr *)obj, NPY_LIST_PICKLE)) {
+ /* a structured dtype with an object in a field */
+ PyArrayObject *arr = (PyArrayObject *)PyArray_FromScalar(self, NULL);
+ if (arr == NULL) {
+ return NULL;
}
-#endif
+ /* arr.item() */
+ PyObject *val = PyArray_GETITEM(arr, PyArray_DATA(arr));
+ Py_DECREF(arr);
+ if (val == NULL) {
+ return NULL;
+ }
+ PyObject *tup = Py_BuildValue("NN", obj, val);
+ if (tup == NULL) {
+ return NULL;
+ }
+ PyTuple_SET_ITEM(ret, 1, tup);
+ }
+ else {
mod = PyBytes_FromStringAndSize(buffer, buflen);
if (mod == NULL) {
Py_DECREF(ret);
-#ifndef Py_UNICODE_WIDE
- ret = NULL;
- goto fail;
-#else
return NULL;
-#endif
}
PyTuple_SET_ITEM(ret, 1,
Py_BuildValue("NN", obj, mod));
-#ifndef Py_UNICODE_WIDE
-fail:
- if (alloc) PyArray_free((char *)buffer);
-#endif
}
return ret;
}
@@ -2267,12 +2108,6 @@ static PyMethodDef gentype_methods[] = {
{"round",
(PyCFunction)gentype_round,
METH_VARARGS | METH_KEYWORDS, NULL},
-#if defined(NPY_PY3K)
- /* Hook for the round() builtin */
- {"__round__",
- (PyCFunction)gentype_round,
- METH_VARARGS | METH_KEYWORDS, NULL},
-#endif
/* For the format function */
{"__format__",
gentype_format,
@@ -2343,6 +2178,18 @@ static PyMethodDef @name@type_methods[] = {
/**end repeat**/
/**begin repeat
+ * #name = integer,floating, complexfloating#
+ */
+static PyMethodDef @name@type_methods[] = {
+ /* Hook for the round() builtin */
+ {"__round__",
+ (PyCFunction)@name@type_dunder_round,
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {NULL, NULL, 0, NULL} /* sentinel */
+};
+/**end repeat**/
+
+/**begin repeat
* #name = half,float,double,longdouble#
*/
static PyMethodDef @name@type_methods[] = {
@@ -2533,164 +2380,32 @@ fail:
}
static PyMappingMethods voidtype_as_mapping = {
- (lenfunc)voidtype_length, /*mp_length*/
- (binaryfunc)voidtype_subscript, /*mp_subscript*/
- (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/
+ .mp_length = (lenfunc)voidtype_length,
+ .mp_subscript = (binaryfunc)voidtype_subscript,
+ .mp_ass_subscript = (objobjargproc)voidtype_ass_subscript,
};
static PySequenceMethods voidtype_as_sequence = {
- (lenfunc)voidtype_length, /*sq_length*/
- 0, /*sq_concat*/
- 0, /*sq_repeat*/
- (ssizeargfunc)voidtype_item, /*sq_item*/
- 0, /*sq_slice*/
- (ssizeobjargproc)voidtype_ass_item, /*sq_ass_item*/
- 0, /* ssq_ass_slice */
- 0, /* sq_contains */
- 0, /* sq_inplace_concat */
- 0, /* sq_inplace_repeat */
+ .sq_length = (lenfunc)voidtype_length,
+ .sq_item = (ssizeargfunc)voidtype_item,
+ .sq_ass_item = (ssizeobjargproc)voidtype_ass_item,
};
-static Py_ssize_t
-gentype_getreadbuf(PyObject *self, Py_ssize_t segment, void **ptrptr)
-{
- int numbytes;
- PyArray_Descr *outcode;
-
- if (segment != 0) {
- PyErr_SetString(PyExc_SystemError,
- "Accessing non-existent array segment");
- return -1;
- }
-
- outcode = PyArray_DescrFromScalar(self);
- numbytes = outcode->elsize;
- *ptrptr = (void *)scalar_value(self, outcode);
-
-#ifndef Py_UNICODE_WIDE
- if (outcode->type_num == NPY_UNICODE) {
- numbytes >>= 1;
- }
-#endif
- Py_DECREF(outcode);
- return numbytes;
-}
-
-#if !defined(NPY_PY3K)
-static Py_ssize_t
-gentype_getsegcount(PyObject *self, Py_ssize_t *lenp)
-{
- PyArray_Descr *outcode;
-
- outcode = PyArray_DescrFromScalar(self);
- if (lenp) {
- *lenp = outcode->elsize;
-#ifndef Py_UNICODE_WIDE
- if (outcode->type_num == NPY_UNICODE) {
- *lenp >>= 1;
- }
-#endif
- }
- Py_DECREF(outcode);
- return 1;
-}
-
-static Py_ssize_t
-gentype_getcharbuf(PyObject *self, Py_ssize_t segment, constchar **ptrptr)
-{
- if (PyArray_IsScalar(self, String) ||
- PyArray_IsScalar(self, Unicode)) {
- return gentype_getreadbuf(self, segment, (void **)ptrptr);
- }
- else {
- PyErr_SetString(PyExc_TypeError,
- "Non-character array cannot be interpreted "\
- "as character buffer.");
- return -1;
- }
-}
-#endif /* !defined(NPY_PY3K) */
-
static PyBufferProcs gentype_as_buffer = {
-#if !defined(NPY_PY3K)
- gentype_getreadbuf, /* bf_getreadbuffer*/
- NULL, /* bf_getwritebuffer*/
- gentype_getsegcount, /* bf_getsegcount*/
- gentype_getcharbuf, /* bf_getcharbuffer*/
-#endif
- gentype_getbuffer, /* bf_getbuffer */
- NULL, /* bf_releasebuffer */
+ .bf_getbuffer = gentype_getbuffer,
+ /* release buffer not defined (see buffer.c) */
};
-#if defined(NPY_PY3K)
#define BASEFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE
#define LEAFFLAGS Py_TPFLAGS_DEFAULT
-#else
-#define BASEFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES
-#define LEAFFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES
-#endif
NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.generic", /* tp_name*/
- sizeof(PyObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
- /* methods */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.generic",
+ .tp_basicsize = sizeof(PyObject),
};
static void
@@ -2709,183 +2424,163 @@ void_dealloc(PyVoidScalarObject *v)
static void
object_arrtype_dealloc(PyObject *v)
{
- Py_XDECREF(((PyObjectScalarObject *)v)->obval);
+ Py_XDECREF(PyArrayScalar_VAL(v, Object));
Py_TYPE(v)->tp_free(v);
}
-/*
- * string and unicode inherit from Python Type first and so GET_ITEM
- * is different to get to the Python Type.
- *
- * ok is a work-around for a bug in complex_new that doesn't allocate
- * memory from the sub-types memory allocator.
- */
-
-#define _WORK(num) \
- if (type->tp_bases && (PyTuple_GET_SIZE(type->tp_bases)==2)) { \
- PyTypeObject *sup; \
- /* We are inheriting from a Python type as well so \
- give it first dibs on conversion */ \
- sup = (PyTypeObject *)PyTuple_GET_ITEM(type->tp_bases, num); \
- /* Prevent recursion */ \
- if (thisfunc != sup->tp_new) { \
- robj = sup->tp_new(type, args, kwds); \
- if (robj != NULL) goto finish; \
- if (PyTuple_GET_SIZE(args)!=1) return NULL; \
- PyErr_Clear(); \
- } \
- /* now do default conversion */ \
- }
-
-#define _WORK1 _WORK(1)
-#define _WORKz _WORK(0)
-#define _WORK0
+static void
+unicode_arrtype_dealloc(PyObject *v)
+{
+ /* note: may be null if it was never requested */
+ PyMem_Free(PyArrayScalar_VAL(v, Unicode));
+ /* delegate to the base class */
+ PyUnicode_Type.tp_dealloc(v);
+}
/**begin repeat
* #name = byte, short, int, long, longlong, ubyte, ushort, uint, ulong,
* ulonglong, half, float, double, longdouble, cfloat, cdouble,
- * clongdouble, string, unicode, object#
+ * clongdouble, string, unicode#
* #Name = Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong,
* ULongLong, Half, Float, Double, LongDouble, CFloat, CDouble,
- * CLongDouble, String, Unicode, Object#
+ * CLongDouble, String, Unicode#
* #TYPE = BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG,
* ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE,
- * CLONGDOUBLE, STRING, UNICODE, OBJECT#
- * #work = 0,0,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,z,z,0#
- * #default = 0*17,1*2,2#
+ * CLONGDOUBLE, STRING, UNICODE#
*/
-#define _NPY_UNUSED2_1
-#define _NPY_UNUSED2_z
-#define _NPY_UNUSED2_0 NPY_UNUSED
-#define _NPY_UNUSED1_0
-#define _NPY_UNUSED1_1
-#define _NPY_UNUSED1_2 NPY_UNUSED
+/* used as a pattern for testing token equality */
+#define _@TYPE@_IS_@TYPE@
static PyObject *
-@name@_arrtype_new(PyTypeObject *_NPY_UNUSED1_@default@(type), PyObject *args, PyObject *_NPY_UNUSED2_@work@(kwds))
+@name@_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
- PyObject *obj = NULL;
- PyObject *robj;
- PyArrayObject *arr;
- PyArray_Descr *typecode = NULL;
-#if (@work@ != 0) || (@default@ == 1)
- void *thisfunc = (void *)@name@_arrtype_new;
+ /* allow base-class (if any) to do conversion */
+#if defined(_@TYPE@_IS_UNICODE)
+ PyObject *from_superclass = PyUnicode_Type.tp_new(type, args, kwds);
+#elif defined(_@TYPE@_IS_STRING)
+ PyObject *from_superclass = PyBytes_Type.tp_new(type, args, kwds);
+#elif defined(_@TYPE@_IS_DOUBLE)
+ PyObject *from_superclass = PyFloat_Type.tp_new(type, args, kwds);
+#endif
+#if defined(_@TYPE@_IS_UNICODE) || defined(_@TYPE@_IS_STRING) || defined(_@TYPE@_IS_DOUBLE)
+ if (from_superclass == NULL) {
+ /* don't clear the exception unless numpy can handle the arguments */
+ if (PyTuple_GET_SIZE(args) != 1 || (kwds && PyDict_Size(kwds) != 0)) {
+ return NULL;
+ }
+ PyErr_Clear();
+ }
+ else {
+#if defined(_@TYPE@_IS_UNICODE)
+ PyArrayScalar_VAL(from_superclass, Unicode) = NULL;
#endif
-#if !(@default@ == 2)
- int itemsize;
- void *dest, *src;
+ return from_superclass;
+ }
#endif
- /*
- * allow base-class (if any) to do conversion
- * If successful, this will jump to finish:
- */
- _WORK@work@
-
/* TODO: include type name in error message, which is not @name@ */
- if (!PyArg_ParseTuple(args, "|O", &obj)) {
+ PyObject *obj = NULL;
+ char *kwnames[] = {"", NULL}; /* positional-only */
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O", kwnames, &obj)) {
return NULL;
}
- typecode = PyArray_DescrFromType(NPY_@TYPE@);
+ PyArray_Descr *typecode = PyArray_DescrFromType(NPY_@TYPE@);
if (typecode == NULL) {
return NULL;
}
- /*
- * typecode is new reference and stolen by
- * PyArray_FromAny but not PyArray_Scalar
- */
if (obj == NULL) {
-#if @default@ == 0
- robj = PyArray_Scalar(NULL, typecode, NULL);
+ PyObject *robj = PyArray_Scalar(NULL, typecode, NULL);
+ Py_DECREF(typecode);
if (robj == NULL) {
- Py_DECREF(typecode);
return NULL;
}
- memset(&((Py@Name@ScalarObject *)robj)->obval, 0, sizeof(npy_@name@));
-#elif @default@ == 1
- robj = PyArray_Scalar(NULL, typecode, NULL);
-#elif @default@ == 2
- Py_INCREF(Py_None);
- robj = Py_None;
+#if !defined(_@TYPE@_IS_STRING) && !defined(_@TYPE@_IS_UNICODE)
+ memset(&PyArrayScalar_VAL(robj, @Name@), 0, sizeof(npy_@name@));
#endif
- Py_DECREF(typecode);
- goto finish;
+ return robj;
}
- /*
- * It is expected at this point that robj is a PyArrayScalar
- * (even for Object Data Type)
- */
- arr = (PyArrayObject *)PyArray_FromAny(obj, typecode,
- 0, 0, NPY_ARRAY_FORCECAST, NULL);
- if ((arr == NULL) || (PyArray_NDIM(arr) > 0)) {
+ /* PyArray_FromAny steals a reference, reclaim it before it's gone */
+ Py_INCREF(typecode);
+ PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny(
+ obj, typecode, 0, 0, NPY_ARRAY_FORCECAST, NULL);
+ if (arr == NULL) {
+ Py_DECREF(typecode);
+ return NULL;
+ }
+ if (PyArray_NDIM(arr) > 0) {
+ Py_DECREF(typecode);
return (PyObject *)arr;
}
- /* 0-d array */
- robj = PyArray_ToScalar(PyArray_DATA(arr), arr);
+
+ /* Convert the 0-d array to a scalar*/
+ PyObject *robj = PyArray_ToScalar(PyArray_DATA(arr), arr);
Py_DECREF(arr);
-finish:
- /*
- * In OBJECT case, robj is no longer a
- * PyArrayScalar at this point but the
- * remaining code assumes it is
- */
-#if @default@ == 2
- return robj;
-#else
- /* Normal return */
- if ((robj == NULL) || (Py_TYPE(robj) == type)) {
+ if (robj == NULL || Py_TYPE(robj) == type) {
+ Py_DECREF(typecode);
return robj;
}
/*
- * This return path occurs when the requested type is not created
- * but another scalar object is created instead (i.e. when
- * the base-class does the conversion in _WORK macro)
+ * `typecode` does not contain any subclass information, as it was thrown
+ * out by the call to `PyArray_DescrFromType` - we need to add this back.
+ *
+ * FIXME[gh-15467]: This branch is also hit for the "shadowed" builtin
+ * types like `longdouble` (which on platforms where they are the same size
+ * is shadowed by `double`), because `PyArray_FromAny` returns the
+ * shadowing type rather than the requested one.
*/
/* Need to allocate new type and copy data-area over */
+ int itemsize;
if (type->tp_itemsize) {
itemsize = PyBytes_GET_SIZE(robj);
}
else {
itemsize = 0;
}
- obj = type->tp_alloc(type, itemsize);
- if (obj == NULL) {
+ PyObject *new_obj = type->tp_alloc(type, itemsize);
+ if (new_obj == NULL) {
Py_DECREF(robj);
+ Py_DECREF(typecode);
return NULL;
}
- /* typecode will be NULL */
- typecode = PyArray_DescrFromType(NPY_@TYPE@);
- dest = scalar_value(obj, typecode);
- src = scalar_value(robj, typecode);
+ void *dest = scalar_value(new_obj, typecode);
+ void *src = scalar_value(robj, typecode);
Py_DECREF(typecode);
-#if @default@ == 0
- *((npy_@name@ *)dest) = *((npy_@name@ *)src);
-#elif @default@ == 1 /* unicode and strings */
+#if defined(_@TYPE@_IS_STRING) || defined(_@TYPE@_IS_UNICODE)
if (itemsize == 0) { /* unicode */
-#if PY_VERSION_HEX >= 0x03030000
itemsize = PyUnicode_GetLength(robj) * PyUnicode_KIND(robj);
-#else
- itemsize = ((PyUnicodeObject *)robj)->length * sizeof(Py_UNICODE);
-#endif
}
memcpy(dest, src, itemsize);
- /* @default@ == 2 won't get here */
+#else
+ *((npy_@name@ *)dest) = *((npy_@name@ *)src);
#endif
Py_DECREF(robj);
- return obj;
-#endif
+ return new_obj;
}
+#undef _@TYPE@_IS_@TYPE@
+
/**end repeat**/
-#undef _WORK1
-#undef _WORKz
-#undef _WORK0
-#undef _WORK
+static PyObject *
+object_arrtype_new(PyTypeObject *NPY_UNUSED(type), PyObject *args, PyObject *kwds)
+{
+ PyObject *obj = Py_None;
+ char *kwnames[] = {"", NULL}; /* positional-only */
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O:object_", kwnames, &obj)) {
+ return NULL;
+ }
+ PyArray_Descr *typecode = PyArray_DescrFromType(NPY_OBJECT);
+ if (typecode == NULL) {
+ return NULL;
+ }
+ PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny(obj, typecode,
+ 0, 0, NPY_ARRAY_FORCECAST, NULL);
+ return PyArray_Return(arr);
+}
/**begin repeat
* #name = datetime, timedelta#
@@ -2900,8 +2595,8 @@ static PyObject *
PyObject *obj = NULL, *meta_obj = NULL;
Py@Name@ScalarObject *ret;
- /* TODO: include type name in error message, which is not @name@ */
- if (!PyArg_ParseTuple(args, "|OO", &obj, &meta_obj)) {
+ char *kwnames[] = {"", "", NULL}; /* positional-only */
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", kwnames, &obj, &meta_obj)) {
return NULL;
}
@@ -2954,12 +2649,13 @@ static PyObject *
/* bool->tp_new only returns Py_True or Py_False */
static PyObject *
-bool_arrtype_new(PyTypeObject *NPY_UNUSED(type), PyObject *args, PyObject *NPY_UNUSED(kwds))
+bool_arrtype_new(PyTypeObject *NPY_UNUSED(type), PyObject *args, PyObject *kwds)
{
PyObject *obj = NULL;
PyArrayObject *arr;
- if (!PyArg_ParseTuple(args, "|O:bool_", &obj)) {
+ char *kwnames[] = {"", NULL}; /* positional-only */
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O:bool_", kwnames, &obj)) {
return NULL;
}
if (obj == NULL) {
@@ -3057,74 +2753,20 @@ bool_index(PyObject *a)
/* Arithmetic methods -- only so we can override &, |, ^. */
NPY_NO_EXPORT PyNumberMethods bool_arrtype_as_number = {
- 0, /* nb_add */
- 0, /* nb_subtract */
- 0, /* nb_multiply */
-#if defined(NPY_PY3K)
-#else
- 0, /* nb_divide */
-#endif
- 0, /* nb_remainder */
- 0, /* nb_divmod */
- 0, /* nb_power */
- 0, /* nb_negative */
- 0, /* nb_positive */
- 0, /* nb_absolute */
- (inquiry)bool_arrtype_nonzero, /* nb_nonzero / nb_bool */
- 0, /* nb_invert */
- 0, /* nb_lshift */
- 0, /* nb_rshift */
- (binaryfunc)bool_arrtype_and, /* nb_and */
- (binaryfunc)bool_arrtype_xor, /* nb_xor */
- (binaryfunc)bool_arrtype_or, /* nb_or */
-#if defined(NPY_PY3K)
-#else
- 0, /* nb_coerce */
-#endif
- 0, /* nb_int */
-#if defined(NPY_PY3K)
- 0, /* nb_reserved */
-#else
- 0, /* nb_long */
-#endif
- 0, /* nb_float */
-#if defined(NPY_PY3K)
-#else
- 0, /* nb_oct */
- 0, /* nb_hex */
-#endif
- /* Added in release 2.0 */
- 0, /* nb_inplace_add */
- 0, /* nb_inplace_subtract */
- 0, /* nb_inplace_multiply */
-#if defined(NPY_PY3K)
-#else
- 0, /* nb_inplace_divide */
-#endif
- 0, /* nb_inplace_remainder */
- 0, /* nb_inplace_power */
- 0, /* nb_inplace_lshift */
- 0, /* nb_inplace_rshift */
- 0, /* nb_inplace_and */
- 0, /* nb_inplace_xor */
- 0, /* nb_inplace_or */
- /* Added in release 2.2 */
- /* The following require the Py_TPFLAGS_HAVE_CLASS flag */
- 0, /* nb_floor_divide */
- 0, /* nb_true_divide */
- 0, /* nb_inplace_floor_divide */
- 0, /* nb_inplace_true_divide */
- /* Added in release 2.5 */
- 0, /* nb_index */
+ .nb_bool = (inquiry)bool_arrtype_nonzero,
+ .nb_and = (binaryfunc)bool_arrtype_and,
+ .nb_xor = (binaryfunc)bool_arrtype_xor,
+ .nb_or = (binaryfunc)bool_arrtype_or,
};
static PyObject *
-void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
+void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
PyObject *obj, *arr;
PyObject *new = NULL;
- if (!PyArg_ParseTuple(args, "O:void", &obj)) {
+ char *kwnames[] = {"", NULL}; /* positional-only */
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:void", kwnames, &obj)) {
return NULL;
}
/*
@@ -3136,11 +2778,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
(PyArray_Check(obj) &&
PyArray_NDIM((PyArrayObject *)obj)==0 &&
PyArray_ISINTEGER((PyArrayObject *)obj))) {
-#if defined(NPY_PY3K)
new = Py_TYPE(obj)->tp_as_number->nb_int(obj);
-#else
- new = Py_TYPE(obj)->tp_as_number->nb_long(obj);
-#endif
}
if (new && PyLong_Check(new)) {
PyObject *ret;
@@ -3188,7 +2826,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
- return (npy_hash_t)(((Py@name@ScalarObject *)obj)->obval);
+ return (npy_hash_t)(PyArrayScalar_VAL(obj, @name@));
}
/**end repeat**/
@@ -3199,7 +2837,7 @@ static npy_hash_t
static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
- npy_hash_t x = (npy_hash_t)(((Py@name@ScalarObject *)obj)->obval);
+ npy_hash_t x = (npy_hash_t)(PyArrayScalar_VAL(obj, @name@));
if (x == -1) {
x = -2;
}
@@ -3210,34 +2848,30 @@ static npy_hash_t
static npy_hash_t
ulong_arrtype_hash(PyObject *obj)
{
- PyObject * l = PyLong_FromUnsignedLong(((PyULongScalarObject*)obj)->obval);
+ PyObject * l = PyLong_FromUnsignedLong(PyArrayScalar_VAL(obj, ULong));
npy_hash_t x = PyObject_Hash(l);
Py_DECREF(l);
return x;
}
-#if (NPY_SIZEOF_INT != NPY_SIZEOF_LONG) || defined(NPY_PY3K)
static npy_hash_t
int_arrtype_hash(PyObject *obj)
{
- npy_hash_t x = (npy_hash_t)(((PyIntScalarObject *)obj)->obval);
+ npy_hash_t x = (npy_hash_t)(PyArrayScalar_VAL(obj, Int));
if (x == -1) {
x = -2;
}
return x;
}
-#endif
-#if defined(NPY_PY3K)
static npy_hash_t
long_arrtype_hash(PyObject *obj)
{
- PyObject * l = PyLong_FromLong(((PyLongScalarObject*)obj)->obval);
+ PyObject * l = PyLong_FromLong(PyArrayScalar_VAL(obj, Long));
npy_hash_t x = PyObject_Hash(l);
Py_DECREF(l);
return x;
}
-#endif
/**begin repeat
* #char = ,u#
@@ -3248,7 +2882,7 @@ static NPY_INLINE npy_hash_t
@char@longlong_arrtype_hash(PyObject *obj)
{
PyObject * l = PyLong_From@Word@LongLong(
- ((Py@Char@LongLongScalarObject*)obj)->obval);
+ PyArrayScalar_VAL(obj, @Char@LongLong));
npy_hash_t x = PyObject_Hash(l);
Py_DECREF(l);
return x;
@@ -3264,7 +2898,7 @@ static NPY_INLINE npy_hash_t
static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
- npy_hash_t x = (npy_hash_t)(((Py@name@ScalarObject *)obj)->obval);
+ npy_hash_t x = (npy_hash_t)(PyArrayScalar_VAL(obj, @name@));
if (x == -1) {
x = -2;
}
@@ -3275,7 +2909,7 @@ static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
npy_hash_t y;
- npy_longlong x = (((Py@name@ScalarObject *)obj)->obval);
+ npy_longlong x = (PyArrayScalar_VAL(obj, @name@));
if ((x <= LONG_MAX)) {
y = (npy_hash_t) x;
@@ -3308,7 +2942,7 @@ static npy_hash_t
static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
- return _Py_HashDouble((double) ((Py@name@ScalarObject *)obj)->obval);
+ return _Py_HashDouble((double) PyArrayScalar_VAL(obj, @name@));
}
/* borrowed from complex_hash */
@@ -3317,13 +2951,13 @@ c@lname@_arrtype_hash(PyObject *obj)
{
npy_hash_t hashreal, hashimag, combined;
hashreal = _Py_HashDouble((double)
- (((PyC@name@ScalarObject *)obj)->obval).real);
+ PyArrayScalar_VAL(obj, C@name@).real);
if (hashreal == -1) {
return -1;
}
hashimag = _Py_HashDouble((double)
- (((PyC@name@ScalarObject *)obj)->obval).imag);
+ PyArrayScalar_VAL(obj, C@name@).imag);
if (hashimag == -1) {
return -1;
}
@@ -3338,13 +2972,13 @@ c@lname@_arrtype_hash(PyObject *obj)
static npy_hash_t
half_arrtype_hash(PyObject *obj)
{
- return _Py_HashDouble(npy_half_to_double(((PyHalfScalarObject *)obj)->obval));
+ return _Py_HashDouble(npy_half_to_double(PyArrayScalar_VAL(obj, Half)));
}
static npy_hash_t
object_arrtype_hash(PyObject *obj)
{
- return PyObject_Hash(((PyObjectScalarObject *)obj)->obval);
+ return PyObject_Hash(PyArrayScalar_VAL(obj, Object));
}
/* we used to just hash the pointer */
@@ -3459,90 +3093,20 @@ object_arrtype_inplace_repeat(PyObjectScalarObject *self, Py_ssize_t count)
}
static PySequenceMethods object_arrtype_as_sequence = {
- (lenfunc)object_arrtype_length, /*sq_length*/
- (binaryfunc)object_arrtype_concat, /*sq_concat*/
- (ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/
- 0, /*sq_item*/
- 0, /*sq_slice*/
- 0, /* sq_ass_item */
- 0, /* sq_ass_slice */
- (objobjproc)object_arrtype_contains, /* sq_contains */
- (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */
- (ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */
+ .sq_length = (lenfunc)object_arrtype_length,
+ .sq_concat = (binaryfunc)object_arrtype_concat,
+ .sq_repeat = (ssizeargfunc)object_arrtype_repeat,
+ .sq_contains = (objobjproc)object_arrtype_contains,
+ .sq_inplace_concat = (binaryfunc)object_arrtype_inplace_concat,
+ .sq_inplace_repeat = (ssizeargfunc)object_arrtype_inplace_repeat,
};
static PyMappingMethods object_arrtype_as_mapping = {
- (lenfunc)object_arrtype_length,
- (binaryfunc)object_arrtype_subscript,
- (objobjargproc)object_arrtype_ass_subscript,
+ .mp_length = (lenfunc)object_arrtype_length,
+ .mp_subscript = (binaryfunc)object_arrtype_subscript,
+ .mp_ass_subscript = (objobjargproc)object_arrtype_ass_subscript,
};
-#if !defined(NPY_PY3K)
-static Py_ssize_t
-object_arrtype_getsegcount(PyObjectScalarObject *self, Py_ssize_t *lenp)
-{
- Py_ssize_t newlen;
- int cnt;
- PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer;
-
- if (pb == NULL ||
- pb->bf_getsegcount == NULL ||
- (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) {
- return 0;
- }
- if (lenp) {
- *lenp = newlen;
- }
- return cnt;
-}
-
-static Py_ssize_t
-object_arrtype_getreadbuf(PyObjectScalarObject *self, Py_ssize_t segment, void **ptrptr)
-{
- PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer;
-
- if (pb == NULL ||
- pb->bf_getreadbuffer == NULL ||
- pb->bf_getsegcount == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "expected a readable buffer object");
- return -1;
- }
- return (*pb->bf_getreadbuffer)(self->obval, segment, ptrptr);
-}
-
-static Py_ssize_t
-object_arrtype_getwritebuf(PyObjectScalarObject *self, Py_ssize_t segment, void **ptrptr)
-{
- PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer;
-
- if (pb == NULL ||
- pb->bf_getwritebuffer == NULL ||
- pb->bf_getsegcount == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "expected a writeable buffer object");
- return -1;
- }
- return (*pb->bf_getwritebuffer)(self->obval, segment, ptrptr);
-}
-
-static Py_ssize_t
-object_arrtype_getcharbuf(PyObjectScalarObject *self, Py_ssize_t segment,
- constchar **ptrptr)
-{
- PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer;
-
- if (pb == NULL ||
- pb->bf_getcharbuffer == NULL ||
- pb->bf_getsegcount == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "expected a character buffer object");
- return -1;
- }
- return (*pb->bf_getcharbuffer)(self->obval, segment, ptrptr);
-}
-#endif
-
static int
object_arrtype_getbuffer(PyObjectScalarObject *self, Py_buffer *view, int flags)
{
@@ -3570,14 +3134,8 @@ object_arrtype_releasebuffer(PyObjectScalarObject *self, Py_buffer *view)
}
static PyBufferProcs object_arrtype_as_buffer = {
-#if !defined(NPY_PY3K)
- (readbufferproc)object_arrtype_getreadbuf,
- (writebufferproc)object_arrtype_getwritebuf,
- (segcountproc)object_arrtype_getsegcount,
- (charbufferproc)object_arrtype_getcharbuf,
-#endif
- (getbufferproc)object_arrtype_getbuffer,
- (releasebufferproc)object_arrtype_releasebuffer,
+ .bf_getbuffer = (getbufferproc)object_arrtype_getbuffer,
+ .bf_releasebuffer = (releasebufferproc)object_arrtype_releasebuffer,
};
static PyObject *
@@ -3587,62 +3145,16 @@ object_arrtype_call(PyObjectScalarObject *obj, PyObject *args, PyObject *kwds)
}
NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.object_", /* tp_name*/
- sizeof(PyObjectScalarObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
- (destructor)object_arrtype_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- &object_arrtype_as_sequence, /* tp_as_sequence */
- &object_arrtype_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- (ternaryfunc)object_arrtype_call, /* tp_call */
- 0, /* tp_str */
- (getattrofunc)object_arrtype_getattro, /* tp_getattro */
- (setattrofunc)object_arrtype_setattro, /* tp_setattro */
- &object_arrtype_as_buffer, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.object_",
+ .tp_basicsize = sizeof(PyObjectScalarObject),
+ .tp_dealloc = (destructor)object_arrtype_dealloc,
+ .tp_as_sequence = &object_arrtype_as_sequence,
+ .tp_as_mapping = &object_arrtype_as_mapping,
+ .tp_call = (ternaryfunc)object_arrtype_call,
+ .tp_getattro = (getattrofunc)object_arrtype_getattro,
+ .tp_setattro = (setattrofunc)object_arrtype_setattro,
+ .tp_as_buffer = &object_arrtype_as_buffer,
};
static PyObject *
@@ -3671,13 +3183,8 @@ gen_arrtype_subscript(PyObject *self, PyObject *key)
#define NAME_bool "bool"
#define NAME_void "void"
-#if defined(NPY_PY3K)
#define NAME_string "bytes"
#define NAME_unicode "str"
-#else
-#define NAME_string "string"
-#define NAME_unicode "unicode"
-#endif
/**begin repeat
* #name = bool, string, unicode, void#
@@ -3685,62 +3192,9 @@ gen_arrtype_subscript(PyObject *self, PyObject *key)
* #ex = _,_,_,#
*/
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy." NAME_@name@ "@ex@", /* tp_name*/
- sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy." NAME_@name@ "@ex@",
+ .tp_basicsize = sizeof(Py@NAME@ScalarObject),
};
/**end repeat**/
@@ -3774,72 +3228,18 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
#define _THIS_SIZE "256"
#endif
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.@name@" _THIS_SIZE, /* tp_name*/
- sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.@name@" _THIS_SIZE,
+ .tp_basicsize = sizeof(Py@NAME@ScalarObject),
};
+
#undef _THIS_SIZE
/**end repeat**/
static PyMappingMethods gentype_as_mapping = {
- NULL,
- (binaryfunc)gen_arrtype_subscript,
- NULL
+ .mp_subscript = (binaryfunc)gen_arrtype_subscript,
};
@@ -3865,62 +3265,10 @@ static PyMappingMethods gentype_as_mapping = {
#endif
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(0, 0)
-#else
- PyObject_HEAD_INIT(0)
- 0, /* ob_size */
-#endif
- "numpy.@name@" _THIS_SIZE, /* tp_name*/
- sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
- 0, /* tp_itemsize*/
- 0, /* tp_dealloc*/
- 0, /* tp_print*/
- 0, /* tp_getattr*/
- 0, /* tp_setattr*/
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- 0, /* tp_repr*/
- 0, /* tp_as_number*/
- 0, /* tp_as_sequence*/
- 0, /* tp_as_mapping*/
- 0, /* tp_hash */
- 0, /* tp_call*/
- 0, /* tp_str*/
- 0, /* tp_getattro*/
- 0, /* tp_setattro*/
- 0, /* tp_as_buffer*/
- Py_TPFLAGS_DEFAULT, /* tp_flags*/
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.@name@" _THIS_SIZE,
+ .tp_basicsize = sizeof(Py@NAME@ScalarObject),
+ .tp_flags = Py_TPFLAGS_DEFAULT,
};
#undef _THIS_SIZE
@@ -4060,8 +3408,11 @@ initialize_casting_tables(void)
_npy_can_cast_safely_table[_FROM_NUM][NPY_STRING] = 1;
_npy_can_cast_safely_table[_FROM_NUM][NPY_UNICODE] = 1;
- /* Allow casts from any integer to the TIMEDELTA type */
-#if @from_isint@ || @from_isuint@
+#if @from_isint@ && NPY_SIZEOF_TIMEDELTA >= _FROM_BSIZE
+ /* Allow casts from smaller or equal signed integers to the TIMEDELTA type */
+ _npy_can_cast_safely_table[_FROM_NUM][NPY_TIMEDELTA] = 1;
+#elif @from_isuint@ && NPY_SIZEOF_TIMEDELTA > _FROM_BSIZE
+ /* Allow casts from smaller unsigned integers to the TIMEDELTA type */
_npy_can_cast_safely_table[_FROM_NUM][NPY_TIMEDELTA] = 1;
#endif
@@ -4250,38 +3601,6 @@ initialize_casting_tables(void)
}
}
-#ifndef NPY_PY3K
-/*
- * In python2, the `float` and `complex` types still implement the obsolete
- * "tp_print" method, which uses CPython's float-printing routines to print the
- * float. Numpy's float_/cfloat inherit from Python float/complex, but
- * override its tp_repr and tp_str methods. In order to avoid an inconsistency
- * with the inherited tp_print, we need to override it too.
- *
- * In python3 the tp_print method is reserved/unused.
- */
-static int
-doubletype_print(PyObject *o, FILE *fp, int flags)
-{
- int ret;
- PyObject *to_print;
- if (flags & Py_PRINT_RAW) {
- to_print = PyObject_Str(o);
- }
- else {
- to_print = PyObject_Repr(o);
- }
-
- if (to_print == NULL) {
- return -1;
- }
-
- ret = PyObject_Print(to_print, fp, Py_PRINT_RAW);
- Py_DECREF(to_print);
- return ret;
-}
-#endif
-
static PyNumberMethods longdoubletype_as_number;
static PyNumberMethods clongdoubletype_as_number;
static void init_basetypes(void);
@@ -4333,12 +3652,6 @@ initialize_numeric_types(void)
/**end repeat**/
-#ifndef NPY_PY3K
- PyDoubleArrType_Type.tp_print = &doubletype_print;
- PyCDoubleArrType_Type.tp_print = &doubletype_print;
-#endif
-
-
PyBoolArrType_Type.tp_as_number->nb_index = (unaryfunc)bool_index;
PyStringArrType_Type.tp_alloc = NULL;
@@ -4385,6 +3698,9 @@ initialize_numeric_types(void)
/**end repeat**/
+ PyUnicodeArrType_Type.tp_dealloc = unicode_arrtype_dealloc;
+ PyUnicodeArrType_Type.tp_as_buffer = &gentype_as_buffer;
+
/**begin repeat
* #name = bool, byte, short, ubyte, ushort, uint, ulong, ulonglong,
* half, float, longdouble, cfloat, clongdouble, void, object,
@@ -4399,8 +3715,8 @@ initialize_numeric_types(void)
/**end repeat**/
/**begin repeat
- * #name = cfloat, clongdouble#
- * #NAME = CFloat, CLongDouble#
+ * #name = cfloat, clongdouble, floating, integer, complexfloating#
+ * #NAME = CFloat, CLongDouble, Floating, Integer, ComplexFloating#
*/
Py@NAME@ArrType_Type.tp_methods = @name@type_methods;
@@ -4416,20 +3732,14 @@ initialize_numeric_types(void)
/**end repeat**/
-#if (NPY_SIZEOF_INT != NPY_SIZEOF_LONG) || defined(NPY_PY3K)
/* We won't be inheriting from Python Int type. */
PyIntArrType_Type.tp_hash = int_arrtype_hash;
-#endif
-#if defined(NPY_PY3K)
/* We won't be inheriting from Python Int type. */
PyLongArrType_Type.tp_hash = long_arrtype_hash;
-#endif
-#if (NPY_SIZEOF_LONG != NPY_SIZEOF_LONGLONG) || defined(NPY_PY3K)
/* We won't be inheriting from Python Int type. */
PyLongLongArrType_Type.tp_hash = longlong_arrtype_hash;
-#endif
/**begin repeat
* #name = repr, str#
@@ -4472,14 +3782,7 @@ initialize_numeric_types(void)
* does not return a normal Python type
*/
@char@longdoubletype_as_number.nb_float = @char@longdoubletype_float;
-#if defined(NPY_PY3K)
@char@longdoubletype_as_number.nb_int = @char@longdoubletype_long;
-#else
- @char@longdoubletype_as_number.nb_int = @char@longdoubletype_int;
- @char@longdoubletype_as_number.nb_long = @char@longdoubletype_long;
- @char@longdoubletype_as_number.nb_hex = @char@longdoubletype_hex;
- @char@longdoubletype_as_number.nb_oct = @char@longdoubletype_oct;
-#endif
Py@CHAR@LongDoubleArrType_Type.tp_as_number = &@char@longdoubletype_as_number;
Py@CHAR@LongDoubleArrType_Type.tp_repr = @char@longdoubletype_repr;
diff --git a/numpy/core/src/multiarray/scalartypes.h b/numpy/core/src/multiarray/scalartypes.h
index 83b188128..861f2c943 100644
--- a/numpy/core/src/multiarray/scalartypes.h
+++ b/numpy/core/src/multiarray/scalartypes.h
@@ -19,13 +19,8 @@ initialize_casting_tables(void);
NPY_NO_EXPORT void
initialize_numeric_types(void);
-#if PY_VERSION_HEX >= 0x03000000
NPY_NO_EXPORT void
gentype_struct_free(PyObject *ptr);
-#else
-NPY_NO_EXPORT void
-gentype_struct_free(void *ptr, void *arg);
-#endif
NPY_NO_EXPORT int
is_anyscalar_exact(PyObject *obj);
diff --git a/numpy/core/src/multiarray/sequence.c b/numpy/core/src/multiarray/sequence.c
index 4769bdad9..1efdd204f 100644
--- a/numpy/core/src/multiarray/sequence.c
+++ b/numpy/core/src/multiarray/sequence.c
@@ -38,8 +38,13 @@ array_contains(PyArrayObject *self, PyObject *el)
if (res == NULL) {
return -1;
}
+
any = PyArray_Any((PyArrayObject *)res, NPY_MAXDIMS, NULL);
Py_DECREF(res);
+ if (any == NULL) {
+ return -1;
+ }
+
ret = PyObject_IsTrue(any);
Py_DECREF(any);
return ret;
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 4e31f003b..30507112d 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -317,7 +317,7 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype)
int offset;
Py_ssize_t pos = 0;
while (PyDict_Next(dtype->fields, &pos, &key, &value)) {
- if NPY_TITLE_KEY(key, value) {
+ if (NPY_TITLE_KEY(key, value)) {
continue;
}
if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) {
@@ -981,7 +981,7 @@ PyArray_Flatten(PyArrayObject *a, NPY_ORDER order)
/* See shape.h for parameters documentation */
NPY_NO_EXPORT PyObject *
-build_shape_string(npy_intp n, npy_intp *vals)
+build_shape_string(npy_intp n, npy_intp const *vals)
{
npy_intp i;
PyObject *ret, *tmp;
diff --git a/numpy/core/src/multiarray/shape.h b/numpy/core/src/multiarray/shape.h
index 0451a463e..d25292556 100644
--- a/numpy/core/src/multiarray/shape.h
+++ b/numpy/core/src/multiarray/shape.h
@@ -6,7 +6,7 @@
* A negative value in 'vals' gets interpreted as newaxis.
*/
NPY_NO_EXPORT PyObject *
-build_shape_string(npy_intp n, npy_intp *vals);
+build_shape_string(npy_intp n, npy_intp const *vals);
/*
* Creates a sorted stride perm matching the KEEPORDER behavior
diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c
index 495d897b2..b570aec08 100644
--- a/numpy/core/src/multiarray/strfuncs.c
+++ b/numpy/core/src/multiarray/strfuncs.c
@@ -64,7 +64,7 @@ extend_str(char **strp, Py_ssize_t n, Py_ssize_t *maxp)
static int
dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
- npy_intp *dimensions, npy_intp *strides, PyArrayObject* self)
+ npy_intp const *dimensions, npy_intp const *strides, PyArrayObject* self)
{
PyObject *op = NULL, *sp = NULL;
char *ostring;
@@ -226,34 +226,3 @@ array_format(PyArrayObject *self, PyObject *args)
}
}
-#ifndef NPY_PY3K
-
-NPY_NO_EXPORT PyObject *
-array_unicode(PyArrayObject *self)
-{
- PyObject *uni;
-
- if (PyArray_NDIM(self) == 0) {
- PyObject *item = PyArray_ToScalar(PyArray_DATA(self), self);
- if (item == NULL){
- return NULL;
- }
-
- /* defer to invoking `unicode` on the scalar */
- uni = PyObject_CallFunctionObjArgs(
- (PyObject *)&PyUnicode_Type, item, NULL);
- Py_DECREF(item);
- }
- else {
- /* Do what unicode(self) would normally do */
- PyObject *str = PyObject_Str((PyObject *)self);
- if (str == NULL){
- return NULL;
- }
- uni = PyUnicode_FromObject(str);
- Py_DECREF(str);
- }
- return uni;
-}
-
-#endif
diff --git a/numpy/core/src/multiarray/strfuncs.h b/numpy/core/src/multiarray/strfuncs.h
index 7e869d926..5dd661a20 100644
--- a/numpy/core/src/multiarray/strfuncs.h
+++ b/numpy/core/src/multiarray/strfuncs.h
@@ -13,9 +13,4 @@ array_str(PyArrayObject *self);
NPY_NO_EXPORT PyObject *
array_format(PyArrayObject *self, PyObject *args);
-#ifndef NPY_PY3K
- NPY_NO_EXPORT PyObject *
- array_unicode(PyArrayObject *self);
-#endif
-
#endif
diff --git a/numpy/core/src/multiarray/typeinfo.c b/numpy/core/src/multiarray/typeinfo.c
index 14c4f27cb..b0563b3c0 100644
--- a/numpy/core/src/multiarray/typeinfo.c
+++ b/numpy/core/src/multiarray/typeinfo.c
@@ -5,8 +5,10 @@
*/
#include "typeinfo.h"
-/* In python 2, this is not exported from Python.h */
+#if (defined(PYPY_VERSION_NUM) && (PYPY_VERSION_NUM <= 0x07030000))
+/* PyPy issue 3160 */
#include <structseq.h>
+#endif
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
@@ -58,11 +60,7 @@ PyArray_typeinfo(
PyObject *entry = PyStructSequence_New(&PyArray_typeinfoType);
if (entry == NULL)
return NULL;
-#if defined(NPY_PY3K)
PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("C", typechar));
-#else
- PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("c", typechar));
-#endif
PyStructSequence_SET_ITEM(entry, 1, Py_BuildValue("i", typenum));
PyStructSequence_SET_ITEM(entry, 2, Py_BuildValue("i", nbits));
PyStructSequence_SET_ITEM(entry, 3, Py_BuildValue("i", align));
@@ -84,11 +82,7 @@ PyArray_typeinforanged(
PyObject *entry = PyStructSequence_New(&PyArray_typeinforangedType);
if (entry == NULL)
return NULL;
-#if defined(NPY_PY3K)
PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("C", typechar));
-#else
- PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("c", typechar));
-#endif
PyStructSequence_SET_ITEM(entry, 1, Py_BuildValue("i", typenum));
PyStructSequence_SET_ITEM(entry, 2, Py_BuildValue("i", nbits));
PyStructSequence_SET_ITEM(entry, 3, Py_BuildValue("i", align));
@@ -104,10 +98,8 @@ PyArray_typeinforanged(
return entry;
}
-/* Python version only needed for backport to 2.7 */
-#if (PY_VERSION_HEX < 0x03040000) \
- || (defined(PYPY_VERSION_NUM) && (PYPY_VERSION_NUM < 0x07020000))
-
+/* Python version needed for older PyPy */
+#if (defined(PYPY_VERSION_NUM) && (PYPY_VERSION_NUM < 0x07020000))
static int
PyStructSequence_InitType2(PyTypeObject *type, PyStructSequence_Desc *desc) {
PyStructSequence_InitType(type, desc);
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index 2e8fb514f..997467b4d 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -128,6 +128,44 @@ PyArray_InitArrFuncs(PyArray_ArrFuncs *f)
f->cancastto = NULL;
}
+
+static int
+test_deprecated_arrfuncs_members(PyArray_ArrFuncs *f) {
+ /* NumPy 1.19, 2020-01-15 */
+ if (f->fastputmask != NULL) {
+ if (DEPRECATE(
+ "The ->f->fastputmask member of custom dtypes is ignored; "
+ "setting it may be an error in the future.\n"
+ "The custom dtype you are using must be revised, but "
+ "results will not be affected.") < 0) {
+ return -1;
+ }
+ }
+ /* NumPy 1.19, 2020-01-15 */
+ if (f->fasttake != NULL) {
+ if (DEPRECATE(
+ "The ->f->fastputmask member of custom dtypes is ignored; "
+ "setting it may be an error in the future.\n"
+ "The custom dtype you are using must be revised, but "
+ "results will not be affected.") < 0) {
+ return -1;
+ }
+ }
+ /* NumPy 1.19, 2020-01-15 */
+ if (f->fastclip != NULL) {
+ /* fastclip was already deprecated at execution time in 1.17. */
+ if (DEPRECATE(
+ "The ->f->fastclip member of custom dtypes is deprecated; "
+ "setting it will be an error in the future.\n"
+ "The custom dtype you are using must be changed to use "
+ "PyUFunc_RegisterLoopForDescr to attach a custom loop to "
+ "np.core.umath.clip, np.minimum, and np.maximum") < 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
/*
returns typenum to associate with this type >=NPY_USERDEF.
needs the userdecrs table and PyArray_NUMUSER variables
@@ -176,6 +214,11 @@ PyArray_RegisterDataType(PyArray_Descr *descr)
PyErr_SetString(PyExc_ValueError, "missing typeobject");
return -1;
}
+
+ if (test_deprecated_arrfuncs_members(f) < 0) {
+ return -1;
+ }
+
userdescrs = realloc(userdescrs,
(NPY_NUMUSERTYPES+1)*sizeof(void *));
if (userdescrs == NULL) {
diff --git a/numpy/core/src/multiarray/vdot.c b/numpy/core/src/multiarray/vdot.c
index 424a21710..9b5d19522 100644
--- a/numpy/core/src/multiarray/vdot.c
+++ b/numpy/core/src/multiarray/vdot.c
@@ -15,17 +15,17 @@ CFLOAT_vdot(char *ip1, npy_intp is1, char *ip2, npy_intp is2,
char *op, npy_intp n, void *NPY_UNUSED(ignore))
{
#if defined(HAVE_CBLAS)
- int is1b = blas_stride(is1, sizeof(npy_cfloat));
- int is2b = blas_stride(is2, sizeof(npy_cfloat));
+ CBLAS_INT is1b = blas_stride(is1, sizeof(npy_cfloat));
+ CBLAS_INT is2b = blas_stride(is2, sizeof(npy_cfloat));
if (is1b && is2b) {
double sum[2] = {0., 0.}; /* double for stability */
while (n > 0) {
- int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
+ CBLAS_INT chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
float tmp[2];
- cblas_cdotc_sub((int)n, ip1, is1b, ip2, is2b, tmp);
+ CBLAS_FUNC(cblas_cdotc_sub)((CBLAS_INT)n, ip1, is1b, ip2, is2b, tmp);
sum[0] += (double)tmp[0];
sum[1] += (double)tmp[1];
/* use char strides here */
@@ -66,17 +66,17 @@ CDOUBLE_vdot(char *ip1, npy_intp is1, char *ip2, npy_intp is2,
char *op, npy_intp n, void *NPY_UNUSED(ignore))
{
#if defined(HAVE_CBLAS)
- int is1b = blas_stride(is1, sizeof(npy_cdouble));
- int is2b = blas_stride(is2, sizeof(npy_cdouble));
+ CBLAS_INT is1b = blas_stride(is1, sizeof(npy_cdouble));
+ CBLAS_INT is2b = blas_stride(is2, sizeof(npy_cdouble));
if (is1b && is2b) {
double sum[2] = {0., 0.}; /* double for stability */
while (n > 0) {
- int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
+ CBLAS_INT chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
double tmp[2];
- cblas_zdotc_sub((int)n, ip1, is1b, ip2, is2b, tmp);
+ CBLAS_FUNC(cblas_zdotc_sub)((CBLAS_INT)n, ip1, is1b, ip2, is2b, tmp);
sum[0] += (double)tmp[0];
sum[1] += (double)tmp[1];
/* use char strides here */
diff --git a/numpy/core/src/npysort/npysort_common.h b/numpy/core/src/npysort/npysort_common.h
index 5fd03b96f..2a6e4d421 100644
--- a/numpy/core/src/npysort/npysort_common.h
+++ b/numpy/core/src/npysort/npysort_common.h
@@ -329,6 +329,14 @@ UNICODE_LT(const npy_ucs4 *s1, const npy_ucs4 *s2, size_t len)
NPY_INLINE static int
DATETIME_LT(npy_datetime a, npy_datetime b)
{
+ if (a == NPY_DATETIME_NAT) {
+ return 0;
+ }
+
+ if (b == NPY_DATETIME_NAT) {
+ return 1;
+ }
+
return a < b;
}
@@ -336,6 +344,14 @@ DATETIME_LT(npy_datetime a, npy_datetime b)
NPY_INLINE static int
TIMEDELTA_LT(npy_timedelta a, npy_timedelta b)
{
+ if (a == NPY_DATETIME_NAT) {
+ return 0;
+ }
+
+ if (b == NPY_DATETIME_NAT) {
+ return 1;
+ }
+
return a < b;
}
diff --git a/numpy/core/src/umath/_operand_flag_tests.c.src b/numpy/core/src/umath/_operand_flag_tests.c.src
index 551a9c632..d22a5c507 100644
--- a/numpy/core/src/umath/_operand_flag_tests.c.src
+++ b/numpy/core/src/umath/_operand_flag_tests.c.src
@@ -14,7 +14,7 @@ static PyMethodDef TestMethods[] = {
static void
-inplace_add(char **args, npy_intp *dimensions, npy_intp *steps, void *data)
+inplace_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void *data)
{
npy_intp i;
npy_intp n = dimensions[0];
@@ -39,7 +39,6 @@ static char types[2] = {NPY_LONG, NPY_LONG};
static void *data[1] = {NULL};
-#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_operand_flag_tests",
@@ -52,22 +51,12 @@ static struct PyModuleDef moduledef = {
NULL
};
-#define RETVAL m
PyMODINIT_FUNC PyInit__operand_flag_tests(void)
{
-#else
-#define RETVAL
-PyMODINIT_FUNC init_operand_flag_tests(void)
-{
-#endif
PyObject *m = NULL;
PyObject *ufunc;
-#if defined(NPY_PY3K)
m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule("_operand_flag_tests", TestMethods);
-#endif
if (m == NULL) {
goto fail;
}
@@ -87,19 +76,16 @@ PyMODINIT_FUNC init_operand_flag_tests(void)
((PyUFuncObject*)ufunc)->iter_flags = NPY_ITER_REDUCE_OK;
PyModule_AddObject(m, "inplace_add", (PyObject*)ufunc);
- return RETVAL;
+ return m;
fail:
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"cannot load _operand_flag_tests module.");
}
-#if defined(NPY_PY3K)
if (m) {
Py_DECREF(m);
m = NULL;
}
-#endif
- return RETVAL;
-
+ return m;
}
diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src
index 615e395c7..651019a84 100644
--- a/numpy/core/src/umath/_rational_tests.c.src
+++ b/numpy/core/src/umath/_rational_tests.c.src
@@ -609,9 +609,6 @@ static PyNumberMethods pyrational_as_number = {
pyrational_add, /* nb_add */
pyrational_subtract, /* nb_subtract */
pyrational_multiply, /* nb_multiply */
-#if PY_MAJOR_VERSION < 3
- pyrational_divide, /* nb_divide */
-#endif
pyrational_remainder, /* nb_remainder */
0, /* nb_divmod */
0, /* nb_power */
@@ -625,27 +622,13 @@ static PyNumberMethods pyrational_as_number = {
0, /* nb_and */
0, /* nb_xor */
0, /* nb_or */
-#if PY_MAJOR_VERSION < 3
- 0, /* nb_coerce */
-#endif
pyrational_int, /* nb_int */
-#if PY_MAJOR_VERSION < 3
- pyrational_int, /* nb_long */
-#else
0, /* reserved */
-#endif
pyrational_float, /* nb_float */
-#if PY_MAJOR_VERSION < 3
- 0, /* nb_oct */
- 0, /* nb_hex */
-#endif
0, /* nb_inplace_add */
0, /* nb_inplace_subtract */
0, /* nb_inplace_multiply */
-#if PY_MAJOR_VERSION < 3
- 0, /* nb_inplace_divide */
-#endif
0, /* nb_inplace_remainder */
0, /* nb_inplace_power */
0, /* nb_inplace_lshift */
@@ -678,12 +661,7 @@ static PyGetSetDef pyrational_getset[] = {
};
static PyTypeObject PyRational_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"rational", /* tp_name */
sizeof(PyRational), /* tp_basicsize */
0, /* tp_itemsize */
@@ -691,11 +669,7 @@ static PyTypeObject PyRational_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
pyrational_repr, /* tp_repr */
&pyrational_as_number, /* tp_as_number */
0, /* tp_as_sequence */
@@ -962,8 +936,8 @@ DEFINE_CAST(npy_bool,rational,rational y = make_rational_int(x);)
DEFINE_CAST(rational,npy_bool,npy_bool y = rational_nonzero(x);)
#define BINARY_UFUNC(name,intype0,intype1,outtype,exp) \
- void name(char** args, npy_intp* dimensions, \
- npy_intp* steps, void* data) { \
+ void name(char** args, npy_intp const *dimensions, \
+ npy_intp const *steps, void* data) { \
npy_intp is0 = steps[0], is1 = steps[1], \
os = steps[2], n = *dimensions; \
char *i0 = args[0], *i1 = args[1], *o = args[2]; \
@@ -998,8 +972,8 @@ BINARY_UFUNC(gcd_ufunc,npy_int64,npy_int64,npy_int64,gcd(x,y))
BINARY_UFUNC(lcm_ufunc,npy_int64,npy_int64,npy_int64,lcm(x,y))
#define UNARY_UFUNC(name,type,exp) \
- void rational_ufunc_##name(char** args, npy_intp* dimensions, \
- npy_intp* steps, void* data) { \
+ void rational_ufunc_##name(char** args, npy_intp const *dimensions, \
+ npy_intp const *steps, void* data) { \
npy_intp is = steps[0], os = steps[1], n = *dimensions; \
char *i = args[0], *o = args[1]; \
int k; \
@@ -1022,7 +996,7 @@ UNARY_UFUNC(numerator,npy_int64,x.n)
UNARY_UFUNC(denominator,npy_int64,d(x))
static NPY_INLINE void
-rational_matrix_multiply(char **args, npy_intp *dimensions, npy_intp *steps)
+rational_matrix_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
/* pointers to data for input and output arrays */
char *ip1 = args[0];
@@ -1067,8 +1041,8 @@ rational_matrix_multiply(char **args, npy_intp *dimensions, npy_intp *steps)
static void
-rational_gufunc_matrix_multiply(char **args, npy_intp *dimensions,
- npy_intp *steps, void *NPY_UNUSED(func))
+rational_gufunc_matrix_multiply(char **args, npy_intp const *dimensions,
+ npy_intp const *steps, void *NPY_UNUSED(func))
{
/* outer dimensions counter */
npy_intp N_;
@@ -1092,8 +1066,8 @@ rational_gufunc_matrix_multiply(char **args, npy_intp *dimensions,
static void
-rational_ufunc_test_add(char** args, npy_intp* dimensions,
- npy_intp* steps, void* data) {
+rational_ufunc_test_add(char** args, npy_intp const *dimensions,
+ npy_intp const *steps, void* data) {
npy_intp is0 = steps[0], is1 = steps[1], os = steps[2], n = *dimensions;
char *i0 = args[0], *i1 = args[1], *o = args[2];
int k;
@@ -1108,8 +1082,8 @@ rational_ufunc_test_add(char** args, npy_intp* dimensions,
static void
-rational_ufunc_test_add_rationals(char** args, npy_intp* dimensions,
- npy_intp* steps, void* data) {
+rational_ufunc_test_add_rationals(char** args, npy_intp const *dimensions,
+ npy_intp const *steps, void* data) {
npy_intp is0 = steps[0], is1 = steps[1], os = steps[2], n = *dimensions;
char *i0 = args[0], *i1 = args[1], *o = args[2];
int k;
@@ -1126,7 +1100,6 @@ PyMethodDef module_methods[] = {
{0} /* sentinel */
};
-#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_rational_tests",
@@ -1138,16 +1111,8 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
-#if defined(NPY_PY3K)
-#define RETVAL m
PyMODINIT_FUNC PyInit__rational_tests(void) {
-#else
-#define RETVAL
-PyMODINIT_FUNC init_rational_tests(void) {
-#endif
-
PyObject *m = NULL;
PyObject* numpy_str;
PyObject* numpy;
@@ -1292,11 +1257,7 @@ PyMODINIT_FUNC init_rational_tests(void) {
REGISTER_UFUNC_UNARY(sign)
/* Create module */
-#if defined(NPY_PY3K)
m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule("_rational_tests", module_methods);
-#endif
if (!m) {
goto fail;
@@ -1392,18 +1353,16 @@ PyMODINIT_FUNC init_rational_tests(void) {
GCD_LCM_UFUNC(gcd,NPY_INT64,"greatest common denominator of two integers");
GCD_LCM_UFUNC(lcm,NPY_INT64,"least common multiple of two integers");
- return RETVAL;
+ return m;
fail:
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"cannot load _rational_tests module.");
}
-#if defined(NPY_PY3K)
if (m) {
Py_DECREF(m);
m = NULL;
}
-#endif
- return RETVAL;
+ return m;
}
diff --git a/numpy/core/src/umath/_struct_ufunc_tests.c.src b/numpy/core/src/umath/_struct_ufunc_tests.c.src
index 3eaac73e1..d602656c8 100644
--- a/numpy/core/src/umath/_struct_ufunc_tests.c.src
+++ b/numpy/core/src/umath/_struct_ufunc_tests.c.src
@@ -17,8 +17,10 @@
* docs.python.org .
*/
-static void add_uint64_triplet(char **args, npy_intp *dimensions,
- npy_intp* steps, void* data)
+static void add_uint64_triplet(char **args,
+ npy_intp const *dimensions,
+ npy_intp const* steps,
+ void* data)
{
npy_intp i;
npy_intp is1=steps[0];
@@ -100,7 +102,6 @@ static PyMethodDef StructUfuncTestMethods[] = {
{NULL, NULL, 0, NULL}
};
-#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_struct_ufunc_tests",
@@ -112,31 +113,18 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
-#if defined(NPY_PY3K)
PyMODINIT_FUNC PyInit__struct_ufunc_tests(void)
-#else
-PyMODINIT_FUNC init_struct_ufunc_tests(void)
-#endif
{
PyObject *m, *add_triplet, *d;
PyObject *dtype_dict;
PyArray_Descr *dtype;
PyArray_Descr *dtypes[3];
-#if defined(NPY_PY3K)
m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule("_struct_ufunc_tests", StructUfuncTestMethods);
-#endif
if (m == NULL) {
-#if defined(NPY_PY3K)
return NULL;
-#else
- return;
-#endif
}
import_array();
@@ -166,7 +154,5 @@ PyMODINIT_FUNC init_struct_ufunc_tests(void)
PyDict_SetItemString(d, "add_triplet", add_triplet);
Py_DECREF(add_triplet);
-#if defined(NPY_PY3K)
return m;
-#endif
}
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index 6c3bcce71..abc8d78c4 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -71,7 +71,7 @@ char *inner1d_signature = "(i),(i)->()";
*/
static void
-@TYPE@_inner1d(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_inner1d(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
INIT_OUTER_LOOP_3
npy_intp di = dimensions[0];
@@ -106,7 +106,7 @@ char *innerwt_signature = "(i),(i),(i)->()";
*/
static void
-@TYPE@_innerwt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_innerwt(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
INIT_OUTER_LOOP_4
npy_intp di = dimensions[0];
@@ -143,7 +143,7 @@ char *matmul_signature = "(m?,n),(n,p?)->(m?,p?)";
*/
static void
-@TYPE@_matrix_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_matrix_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/* no BLAS is available */
INIT_OUTER_LOOP_3
@@ -212,7 +212,7 @@ char *cross1d_signature = "(3),(3)->(3)";
* out[n, 2] = in1[n, 0]*in2[n, 1] - in1[n, 1]*in2[n, 0]
*/
static void
-@TYPE@_cross1d(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_cross1d(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
INIT_OUTER_LOOP_3
npy_intp is1=steps[0], is2=steps[1], os = steps[2];
@@ -252,7 +252,7 @@ char *euclidean_pdist_signature = "(n,d)->(p)";
*/
static void
-@TYPE@_euclidean_pdist(char **args, npy_intp *dimensions, npy_intp *steps,
+@TYPE@_euclidean_pdist(char **args, npy_intp const *dimensions, npy_intp const *steps,
void *NPY_UNUSED(func))
{
INIT_OUTER_LOOP_2
@@ -308,7 +308,7 @@ char *cumsum_signature = "(i)->(i)";
*/
static void
-@TYPE@_cumsum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_cumsum(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
INIT_OUTER_LOOP_2
npy_intp di = dimensions[0];
@@ -586,7 +586,6 @@ static PyMethodDef UMath_TestsMethods[] = {
{NULL, NULL, 0, NULL} /* Sentinel */
};
-#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_umath_tests",
@@ -598,31 +597,26 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
/* Initialization function for the module */
-#if defined(NPY_PY3K)
-#define RETVAL(x) x
PyMODINIT_FUNC PyInit__umath_tests(void) {
-#else
-#define RETVAL(x)
-PyMODINIT_FUNC init_umath_tests(void) {
-#endif
PyObject *m;
PyObject *d;
PyObject *version;
-#if defined(NPY_PY3K)
m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule("_umath_tests", UMath_TestsMethods);
-#endif
if (m == NULL) {
- return RETVAL(NULL);
+ return NULL;
}
import_array();
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
import_ufunc();
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
d = PyModule_GetDict(m);
@@ -636,8 +630,8 @@ PyMODINIT_FUNC init_umath_tests(void) {
PyErr_Print();
PyErr_SetString(PyExc_RuntimeError,
"cannot load _umath_tests module.");
- return RETVAL(NULL);
+ return NULL;
}
- return RETVAL(m);
+ return m;
}
diff --git a/numpy/core/src/umath/clip.c.src b/numpy/core/src/umath/clip.c.src
index 30fa3d2b3..9c4bac2d1 100644
--- a/numpy/core/src/umath/clip.c.src
+++ b/numpy/core/src/umath/clip.c.src
@@ -79,7 +79,7 @@
_NPY_@name@_MIN(_NPY_@name@_MAX((x), (min)), (max))
NPY_NO_EXPORT void
-@name@_clip(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@name@_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (steps[1] == 0 && steps[2] == 0) {
/* min and max are constant throughout the loop, the most common case */
diff --git a/numpy/core/src/umath/clip.h.src b/numpy/core/src/umath/clip.h.src
index d77971ad7..f16856cdf 100644
--- a/numpy/core/src/umath/clip.h.src
+++ b/numpy/core/src/umath/clip.h.src
@@ -12,7 +12,7 @@
* DATETIME, TIMEDELTA#
*/
NPY_NO_EXPORT void
-@name@_clip(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@name@_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat**/
#endif
diff --git a/numpy/core/src/umath/cpuid.c b/numpy/core/src/umath/cpuid.c
deleted file mode 100644
index 72c6493e8..000000000
--- a/numpy/core/src/umath/cpuid.c
+++ /dev/null
@@ -1,97 +0,0 @@
-#define _UMATHMODULE
-#define _MULTIARRAYMODULE
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-
-#include <Python.h>
-
-#include "npy_config.h"
-
-#include "cpuid.h"
-
-#define XCR_XFEATURE_ENABLED_MASK 0x0
-#define XSTATE_SSE 0x2
-#define XSTATE_YMM 0x4
-#define XSTATE_ZMM 0x70
-
-/*
- * verify the OS supports avx instructions
- * it can be disabled in some OS, e.g. with the nosavex boot option of linux
- */
-static NPY_INLINE
-int os_avx_support(void)
-{
-#if HAVE_XGETBV
- /*
- * use bytes for xgetbv to avoid issues with compiler not knowing the
- * instruction
- */
- unsigned int eax, edx;
- unsigned int ecx = XCR_XFEATURE_ENABLED_MASK;
- __asm__("xgetbv" : "=a" (eax), "=d" (edx) : "c" (ecx));
- return (eax & (XSTATE_SSE | XSTATE_YMM)) == (XSTATE_SSE | XSTATE_YMM);
-#else
- return 0;
-#endif
-}
-
-static NPY_INLINE
-int os_avx512_support(void)
-{
-#if HAVE_XGETBV
- unsigned int eax, edx;
- unsigned int ecx = XCR_XFEATURE_ENABLED_MASK;
- unsigned int xcr0 = XSTATE_ZMM | XSTATE_YMM | XSTATE_SSE;
- __asm__("xgetbv" : "=a" (eax), "=d" (edx) : "c" (ecx));
- return (eax & xcr0) == xcr0;
-#else
- return 0;
-#endif
-}
-
-static NPY_INLINE
-int cpu_supports_fma(void)
-{
-#ifdef __x86_64__
- unsigned int feature = 0x01;
- unsigned int a, b, c, d;
- __asm__ volatile (
- "cpuid" "\n\t"
- : "=a" (a), "=b" (b), "=c" (c), "=d" (d)
- : "a" (feature));
- /*
- * FMA is the 12th bit of ECX
- */
- return (c >> 12) & 1;
-#else
- return 0;
-#endif
-}
-
-/*
- * Primitive cpu feature detect function
- * Currently only supports checking for avx on gcc compatible compilers.
- */
-NPY_NO_EXPORT int
-npy_cpu_supports(const char * feature)
-{
-#ifdef HAVE___BUILTIN_CPU_SUPPORTS
- if (strcmp(feature, "avx512f") == 0) {
-#ifdef HAVE___BUILTIN_CPU_SUPPORTS_AVX512F
- return __builtin_cpu_supports("avx512f") && os_avx512_support();
-#else
- return 0;
-#endif
- }
- else if (strcmp(feature, "fma") == 0) {
- return cpu_supports_fma() && __builtin_cpu_supports("avx2") && os_avx_support();
- }
- else if (strcmp(feature, "avx2") == 0) {
- return __builtin_cpu_supports("avx2") && os_avx_support();
- }
- else if (strcmp(feature, "avx") == 0) {
- return __builtin_cpu_supports("avx") && os_avx_support();
- }
-#endif
-
- return 0;
-}
diff --git a/numpy/core/src/umath/cpuid.h b/numpy/core/src/umath/cpuid.h
deleted file mode 100644
index 33702ed41..000000000
--- a/numpy/core/src/umath/cpuid.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _NPY_PRIVATE__CPUID_H_
-#define _NPY_PRIVATE__CPUID_H_
-
-#include <numpy/ndarraytypes.h> /* for NPY_NO_EXPORT */
-
-NPY_NO_EXPORT int
-npy_cpu_supports(const char * feature);
-
-#endif
diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c
index aea1815e8..3404a0c6a 100644
--- a/numpy/core/src/umath/extobj.c
+++ b/numpy/core/src/umath/extobj.c
@@ -165,7 +165,7 @@ get_global_ext_obj(void)
if (thedict == NULL) {
thedict = PyEval_GetBuiltins();
}
- ref = PyDict_GetItem(thedict, npy_um_str_pyvals_name);
+ ref = PyDict_GetItemWithError(thedict, npy_um_str_pyvals_name);
#if USE_USE_DEFAULTS==1
}
#endif
@@ -290,6 +290,9 @@ _check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) {
/* Get error object globals */
if (extobj == NULL) {
extobj = get_global_ext_obj();
+ if (extobj == NULL && PyErr_Occurred()) {
+ return -1;
+ }
}
if (_extract_pyvals(extobj, ufunc_name,
NULL, NULL, &errobj) < 0) {
@@ -311,6 +314,9 @@ _get_bufsize_errmask(PyObject * extobj, const char *ufunc_name,
/* Get the buffersize and errormask */
if (extobj == NULL) {
extobj = get_global_ext_obj();
+ if (extobj == NULL && PyErr_Occurred()) {
+ return -1;
+ }
}
if (_extract_pyvals(extobj, ufunc_name,
buffersize, errormask, NULL) < 0) {
diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h
index ae6d69a3e..e6789e1d6 100644
--- a/numpy/core/src/umath/fast_loop_macros.h
+++ b/numpy/core/src/umath/fast_loop_macros.h
@@ -4,8 +4,8 @@
* These expect to have access to the arguments of a typical ufunc loop,
*
* char **args
- * npy_intp *dimensions
- * npy_intp *steps
+ * npy_intp const *dimensions
+ * npy_intp const *steps
*/
#ifndef _NPY_UMATH_FAST_LOOP_MACROS_H_
#define _NPY_UMATH_FAST_LOOP_MACROS_H_
diff --git a/numpy/core/src/umath/funcs.inc.src b/numpy/core/src/umath/funcs.inc.src
index 10ed66e50..273779ee8 100644
--- a/numpy/core/src/umath/funcs.inc.src
+++ b/numpy/core/src/umath/funcs.inc.src
@@ -38,11 +38,7 @@ Py_reciprocal(PyObject *o)
if (!one) {
return NULL;
}
-#if defined(NPY_PY3K)
result = PyNumber_TrueDivide(one, o);
-#else
- result = PyNumber_Divide(one, o);
-#endif
Py_DECREF(one);
return result;
}
@@ -197,8 +193,7 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2)
{
PyObject *gcd = NULL;
- /* use math.gcd if available, and valid on the provided types */
-#if PY_VERSION_HEX >= 0x03050000
+ /* use math.gcd if valid on the provided types */
{
static PyObject *math_gcd_func = NULL;
@@ -213,7 +208,6 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2)
/* silence errors, and fall back on pure-python gcd */
PyErr_Clear();
}
-#endif
/* otherwise, use our internal one, written in python */
{
@@ -360,9 +354,9 @@ nc_exp2@c@(@ctype@ *x, @ctype@ *r)
static void
nc_expm1@c@(@ctype@ *x, @ctype@ *r)
{
- @ftype@ a = npy_exp@c@(x->real);
- r->real = a*npy_cos@c@(x->imag) - 1.0@c@;
- r->imag = a*npy_sin@c@(x->imag);
+ @ftype@ a = npy_sin@c@(x->imag / 2);
+ r->real = npy_expm1@c@(x->real) * npy_cos@c@(x->imag) - 2 * a * a;
+ r->imag = npy_exp@c@(x->real) * npy_sin@c@(x->imag);
return;
}
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index d948e25bb..9b43824cb 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -54,210 +54,123 @@
** GENERIC FLOAT LOOPS **
*****************************************************************************/
+/* direct loops using a suitable callback */
-typedef float halfUnaryFunc(npy_half x);
-typedef float floatUnaryFunc(float x);
-typedef double doubleUnaryFunc(double x);
-typedef npy_longdouble longdoubleUnaryFunc(npy_longdouble x);
-typedef npy_half halfBinaryFunc(npy_half x, npy_half y);
-typedef float floatBinaryFunc(float x, float y);
-typedef double doubleBinaryFunc(double x, double y);
-typedef npy_longdouble longdoubleBinaryFunc(npy_longdouble x, npy_longdouble y);
-
+/**begin repeat
+ * #c = e, f, d, g#
+ * #type = npy_half, npy_float, npy_double, npy_longdouble#
+ **/
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@_@c@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
- halfUnaryFunc *f = (halfUnaryFunc *)func;
+ typedef @type@ func_type(@type@);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
- const npy_half in1 = *(npy_half *)ip1;
- *(npy_half *)op1 = f(in1);
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = f(in1);
}
}
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@@c@_@c@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
- floatUnaryFunc *f = (floatUnaryFunc *)func;
- UNARY_LOOP {
- const float in1 = npy_half_to_float(*(npy_half *)ip1);
- *(npy_half *)op1 = npy_float_to_half(f(in1));
+ typedef @type@ func_type(@type@, @type@);
+ func_type *f = (func_type *)func;
+ BINARY_LOOP {
+ @type@ in1 = *(@type@ *)ip1;
+ @type@ in2 = *(@type@ *)ip2;
+ *(@type@ *)op1 = f(in1, in2);
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleUnaryFunc *f = (doubleUnaryFunc *)func;
- UNARY_LOOP {
- const double in1 = npy_half_to_double(*(npy_half *)ip1);
- *(npy_half *)op1 = npy_double_to_half(f(in1));
- }
-}
+/**end repeat**/
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- floatUnaryFunc *f = (floatUnaryFunc *)func;
- UNARY_LOOP {
- const float in1 = *(float *)ip1;
- *(float *)op1 = f(in1);
- }
-}
+/* indirect loops with casting */
+/**begin repeat
+ * #c1 = e, e, f#
+ * #type1 = npy_half, npy_half, npy_float#
+ * #c2 = f, d, d#
+ * #type2 = npy_float, npy_double, npy_double#
+ *
+ * #conv12 = npy_half_to_float, npy_half_to_double, (double)#
+ * #conv21 = npy_float_to_half, npy_double_to_half, (float)#
+ **/
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c1@_@c1@_As_@c2@_@c2@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
- doubleUnaryFunc *f = (doubleUnaryFunc *)func;
+ typedef @type2@ func_type(@type2@);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
- const float in1 = *(float *)ip1;
- *(float *)op1 = (float)f((double)in1);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- halfBinaryFunc *f = (halfBinaryFunc *)func;
- BINARY_LOOP {
- npy_half in1 = *(npy_half *)ip1;
- npy_half in2 = *(npy_half *)ip2;
- *(npy_half *)op1 = f(in1, in2);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- floatBinaryFunc *f = (floatBinaryFunc *)func;
- BINARY_LOOP {
- float in1 = npy_half_to_float(*(npy_half *)ip1);
- float in2 = npy_half_to_float(*(npy_half *)ip2);
- *(npy_half *)op1 = npy_float_to_half(f(in1, in2));
+ const @type2@ in1 = @conv12@(*(@type1@ *)ip1);
+ *(@type1@ *)op1 = @conv21@(f(in1));
}
}
-
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c1@@c1@_@c1@_As_@c2@@c2@_@c2@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
- doubleBinaryFunc *f = (doubleBinaryFunc *)func;
+ typedef @type2@ func_type(@type2@, @type2@);
+ func_type *f = (func_type *)func;
BINARY_LOOP {
- double in1 = npy_half_to_double(*(npy_half *)ip1);
- double in2 = npy_half_to_double(*(npy_half *)ip2);
- *(npy_half *)op1 = npy_double_to_half(f(in1, in2));
+ const @type2@ in1 = @conv12@(*(@type1@ *)ip1);
+ const @type2@ in2 = @conv12@(*(@type1@ *)ip2);
+ *(@type1@ *)op1 = @conv21@(f(in1, in2));
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- floatBinaryFunc *f = (floatBinaryFunc *)func;
- BINARY_LOOP {
- float in1 = *(float *)ip1;
- float in2 = *(float *)ip2;
- *(float *)op1 = f(in1, in2);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleBinaryFunc *f = (doubleBinaryFunc *)func;
- BINARY_LOOP {
- float in1 = *(float *)ip1;
- float in2 = *(float *)ip2;
- *(float *)op1 = (double)f((double)in1, (double)in2);
- }
-}
+/**end repeat**/
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleUnaryFunc *f = (doubleUnaryFunc *)func;
- UNARY_LOOP {
- double in1 = *(double *)ip1;
- *(double *)op1 = f(in1);
- }
-}
+/******************************************************************************
+ ** GENERIC COMPLEX LOOPS **
+ *****************************************************************************/
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleBinaryFunc *f = (doubleBinaryFunc *)func;
- BINARY_LOOP {
- double in1 = *(double *)ip1;
- double in2 = *(double *)ip2;
- *(double *)op1 = f(in1, in2);
- }
-}
+/* direct loops using a suitable callback */
+/**begin repeat
+ * #c = F, D, G#
+ * #type = npy_cfloat, npy_cdouble, npy_clongdouble#
+ **/
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@_@c@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
- longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func;
+ typedef void func_type(@type@ *, @type@ *);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
- npy_longdouble in1 = *(npy_longdouble *)ip1;
- *(npy_longdouble *)op1 = f(in1);
+ @type@ in1 = *(@type@ *)ip1;
+ @type@ *out = (@type@ *)op1;
+ f(&in1, out);
}
}
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@@c@_@c@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
- longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func;
+ typedef void func_type(@type@ *, @type@ *, @type@ *);
+ func_type *f = (func_type *)func;
BINARY_LOOP {
- npy_longdouble in1 = *(npy_longdouble *)ip1;
- npy_longdouble in2 = *(npy_longdouble *)ip2;
- *(npy_longdouble *)op1 = f(in1, in2);
+ @type@ in1 = *(@type@ *)ip1;
+ @type@ in2 = *(@type@ *)ip2;
+ @type@ *out = (@type@ *)op1;
+ f(&in1, &in2, out);
}
}
+/**end repeat**/
-
-/******************************************************************************
- ** GENERIC COMPLEX LOOPS **
- *****************************************************************************/
-
-
-typedef void cdoubleUnaryFunc(npy_cdouble *x, npy_cdouble *r);
-typedef void cfloatUnaryFunc(npy_cfloat *x, npy_cfloat *r);
-typedef void clongdoubleUnaryFunc(npy_clongdouble *x, npy_clongdouble *r);
-typedef void cdoubleBinaryFunc(npy_cdouble *x, npy_cdouble *y, npy_cdouble *r);
-typedef void cfloatBinaryFunc(npy_cfloat *x, npy_cfloat *y, npy_cfloat *r);
-typedef void clongdoubleBinaryFunc(npy_clongdouble *x, npy_clongdouble *y,
- npy_clongdouble *r);
-
+/* indirect loops with casting */
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_F_F_As_D_D(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
- cfloatUnaryFunc *f = (cfloatUnaryFunc *)func;
- UNARY_LOOP {
- npy_cfloat in1 = *(npy_cfloat *)ip1;
- npy_cfloat *out = (npy_cfloat *)op1;
- f(&in1, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func;
+ typedef void func_type(npy_cdouble *, npy_cdouble *);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
npy_cdouble tmp, out;
tmp.real = (double)((float *)ip1)[0];
@@ -270,22 +183,10 @@ PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void *fun
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cfloatBinaryFunc *f = (cfloatBinaryFunc *)func;
- BINARY_LOOP {
- npy_cfloat in1 = *(npy_cfloat *)ip1;
- npy_cfloat in2 = *(npy_cfloat *)ip2;
- npy_cfloat *out = (npy_cfloat *)op1;
- f(&in1, &in2, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_FF_F_As_DD_D(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
- cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func;
+ typedef void func_type(npy_cdouble *, npy_cdouble *, npy_cdouble *);
+ func_type *f = (func_type *)func;
BINARY_LOOP {
npy_cdouble tmp1, tmp2, out;
tmp1.real = (double)((float *)ip1)[0];
@@ -298,56 +199,6 @@ PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void *f
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func;
- UNARY_LOOP {
- npy_cdouble in1 = *(npy_cdouble *)ip1;
- npy_cdouble *out = (npy_cdouble *)op1;
- f(&in1, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func;
- BINARY_LOOP {
- npy_cdouble in1 = *(npy_cdouble *)ip1;
- npy_cdouble in2 = *(npy_cdouble *)ip2;
- npy_cdouble *out = (npy_cdouble *)op1;
- f(&in1, &in2, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func;
- UNARY_LOOP {
- npy_clongdouble in1 = *(npy_clongdouble *)ip1;
- npy_clongdouble *out = (npy_clongdouble *)op1;
- f(&in1, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func;
- BINARY_LOOP {
- npy_clongdouble in1 = *(npy_clongdouble *)ip1;
- npy_clongdouble in2 = *(npy_clongdouble *)ip2;
- npy_clongdouble *out = (npy_clongdouble *)op1;
- f(&in1, &in2, out);
- }
-}
-
/******************************************************************************
** GENERIC OBJECT lOOPS **
@@ -355,7 +206,7 @@ PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_O_O(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
unaryfunc f = (unaryfunc)func;
UNARY_LOOP {
@@ -372,7 +223,7 @@ PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_O_O_method(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
char *meth = (char *)func;
PyObject *tup = PyTuple_New(0);
@@ -384,7 +235,11 @@ PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp *steps, void *fun
PyObject **out = (PyObject **)op1;
PyObject *ret, *func;
func = PyObject_GetAttrString(in1 ? in1 : Py_None, meth);
- if (func == NULL || !PyCallable_Check(func)) {
+ if (func != NULL && !PyCallable_Check(func)) {
+ Py_DECREF(func);
+ func = NULL;
+ }
+ if (func == NULL) {
PyObject *exc, *val, *tb;
PyTypeObject *type = in1 ? Py_TYPE(in1) : Py_TYPE(Py_None);
PyErr_Fetch(&exc, &val, &tb);
@@ -411,7 +266,7 @@ PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp *steps, void *fun
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_OO_O(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
binaryfunc f = (binaryfunc)func;
BINARY_LOOP {
@@ -428,7 +283,7 @@ PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
}
NPY_NO_EXPORT void
-PyUFunc_OOO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_OOO_O(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
ternaryfunc f = (ternaryfunc)func;
TERNARY_LOOP {
@@ -451,7 +306,7 @@ PyUFunc_OOO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_OO_O_method(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
char *meth = (char *)func;
BINARY_LOOP {
@@ -475,7 +330,7 @@ PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp *steps, void *fu
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_On_Om(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func)
{
npy_intp n = dimensions[0];
PyUFunc_PyFuncData *data = (PyUFunc_PyFuncData *)func;
@@ -556,7 +411,7 @@ PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
**/
NPY_NO_EXPORT void
-BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
npy_bool in1 = *((npy_bool *)ip1) != 0;
@@ -575,7 +430,7 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
**/
NPY_NO_EXPORT void
-BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if(IS_BINARY_REDUCE) {
#ifdef NPY_HAVE_SSE2_INTRINSICS
@@ -645,7 +500,7 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
* #OP = !=, ==#
**/
NPY_NO_EXPORT void
-BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (run_unary_simd_@kind@_BOOL(args, dimensions, steps)) {
return;
@@ -660,7 +515,7 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
/**end repeat**/
NPY_NO_EXPORT void
-BOOL__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+BOOL__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
OUTPUT_LOOP {
*((npy_bool *)op1) = 1;
@@ -674,7 +529,7 @@ BOOL__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN
* #val = NPY_FALSE, NPY_FALSE, NPY_TRUE#
**/
NPY_NO_EXPORT void
-BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/*
* The (void)in; suppresses an unused variable warning raised by gcc and allows
@@ -707,7 +562,7 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
#define @TYPE@_fmin @TYPE@_minimum
NPY_NO_EXPORT void
-@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
OUTPUT_LOOP {
*((@type@ *)op1) = 1;
@@ -715,7 +570,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_positive(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(@type@, @type@, *out = +in);
}
@@ -729,7 +584,7 @@ NPY_NO_EXPORT void
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
-@TYPE@_square@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_square@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
UNARY_LOOP_FAST(@type@, @type@, *out = in * in);
}
@@ -737,7 +592,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
-@TYPE@_reciprocal@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_reciprocal@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
UNARY_LOOP_FAST(@type@, @type@, *out = 1.0 / in);
}
@@ -745,7 +600,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
-@TYPE@_conjugate@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_conjugate@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(@type@, @type@, *out = in);
}
@@ -753,7 +608,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
-@TYPE@_negative@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_negative@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(@type@, @type@, *out = -in);
}
@@ -761,7 +616,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
-@TYPE@_logical_not@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_logical_not@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(@type@, npy_bool, *out = !in);
}
@@ -769,7 +624,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
-@TYPE@_invert@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_invert@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(@type@, @type@, *out = ~in);
}
@@ -783,7 +638,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
-@TYPE@_@kind@@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
@@ -813,7 +668,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
#define UINT_left_shift_needs_clear_floatstatus
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_left_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps,
+@TYPE@_left_shift@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps,
void *NPY_UNUSED(func))
{
BINARY_LOOP_FAST(@type@, @type@, *out = npy_lshift@c@(in1, in2));
@@ -833,7 +688,7 @@ NPY_NO_EXPORT
NPY_GCC_OPT_3
#endif
void
-@TYPE@_right_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps,
+@TYPE@_right_shift@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps,
void *NPY_UNUSED(func))
{
BINARY_LOOP_FAST(@type@, @type@, *out = npy_rshift@c@(in1, in2));
@@ -848,7 +703,7 @@ void
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
-@TYPE@_@kind@@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/*
* gcc vectorization of this is not good (PR60575) but manual integer
@@ -862,7 +717,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
-@TYPE@_logical_xor@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_logical_xor@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const int t1 = !!*(@type@ *)ip1;
@@ -880,7 +735,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
@@ -901,7 +756,7 @@ NPY_NO_EXPORT void
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_power(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_power(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
@type@ in1 = *(@type@ *)ip1;
@@ -941,7 +796,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_fmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_fmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -963,7 +818,7 @@ NPY_NO_EXPORT void
* #val = NPY_FALSE, NPY_FALSE, NPY_TRUE#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/*
* The (void)in; suppresses an unused variable warning raised by gcc and allows
@@ -982,19 +837,19 @@ NPY_NO_EXPORT void
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_absolute(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(@type@, @type@, *out = (in >= 0) ? in : -in);
}
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : (in < 0 ? -1 : 0));
}
NPY_NO_EXPORT void
-@TYPE@_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1020,7 +875,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1043,7 +898,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_divmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP_TWO_OUT {
const @type@ in1 = *(@type@ *)ip1;
@@ -1074,7 +929,7 @@ NPY_NO_EXPORT void
* #kind = gcd, lcm#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1093,19 +948,19 @@ NPY_NO_EXPORT void
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_absolute(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(@type@, @type@, *out = in);
}
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : 0);
}
NPY_NO_EXPORT void
-@TYPE@_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1121,7 +976,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1137,7 +992,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_divmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP_TWO_OUT {
const @type@ in1 = *(@type@ *)ip1;
@@ -1158,7 +1013,7 @@ NPY_NO_EXPORT void
* #kind = gcd, lcm#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1177,7 +1032,7 @@ NPY_NO_EXPORT void
*/
NPY_NO_EXPORT void
-TIMEDELTA_negative(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_negative(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1191,7 +1046,7 @@ TIMEDELTA_negative(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
}
NPY_NO_EXPORT void
-TIMEDELTA_positive(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1200,7 +1055,7 @@ TIMEDELTA_positive(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
}
NPY_NO_EXPORT void
-TIMEDELTA_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_absolute(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1214,7 +1069,7 @@ TIMEDELTA_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
}
NPY_NO_EXPORT void
-TIMEDELTA_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1228,7 +1083,7 @@ TIMEDELTA_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU
*/
NPY_NO_EXPORT void
-@TYPE@_isnat(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_isnat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1237,7 +1092,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_isfinite(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1246,7 +1101,13 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_isinf(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
+{
+ UNARY_LOOP_FAST(npy_bool, npy_bool, (void)in; *out = NPY_FALSE);
+}
+
+NPY_NO_EXPORT void
+@TYPE@__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
OUTPUT_LOOP {
*((@type@ *)op1) = 1;
@@ -1258,7 +1119,7 @@ NPY_NO_EXPORT void
* #OP = ==, >, >=, <, <=#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1271,7 +1132,7 @@ NPY_NO_EXPORT void
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_not_equal(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1288,7 +1149,30 @@ NPY_NO_EXPORT void
* #OP = >, <#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ in2 = *(@type@ *)ip2;
+ if (in1 == NPY_DATETIME_NAT) {
+ *((@type@ *)op1) = in1;
+ }
+ else if (in2 == NPY_DATETIME_NAT) {
+ *((@type@ *)op1) = in2;
+ }
+ else {
+ *((@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2;
+ }
+ }
+}
+/**end repeat1**/
+
+/**begin repeat1
+ * #kind = fmax, fmin#
+ * #OP = >=, <=#
+ **/
+NPY_NO_EXPORT void
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1300,7 +1184,7 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = in1;
}
else {
- *((@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2;
+ *((@type@ *)op1) = in1 @OP@ in2 ? in1 : in2;
}
}
}
@@ -1309,7 +1193,7 @@ NPY_NO_EXPORT void
/**end repeat**/
NPY_NO_EXPORT void
-DATETIME_Mm_M_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+DATETIME_Mm_M_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
BINARY_LOOP {
const npy_datetime in1 = *(npy_datetime *)ip1;
@@ -1324,7 +1208,7 @@ DATETIME_Mm_M_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_
}
NPY_NO_EXPORT void
-DATETIME_mM_M_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+DATETIME_mM_M_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1339,7 +1223,7 @@ DATETIME_mM_M_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_
}
NPY_NO_EXPORT void
-TIMEDELTA_mm_m_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_mm_m_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1354,7 +1238,7 @@ TIMEDELTA_mm_m_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
}
NPY_NO_EXPORT void
-DATETIME_Mm_M_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+DATETIME_Mm_M_subtract(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_datetime in1 = *(npy_datetime *)ip1;
@@ -1369,7 +1253,7 @@ DATETIME_Mm_M_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void
}
NPY_NO_EXPORT void
-DATETIME_MM_m_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+DATETIME_MM_m_subtract(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_datetime in1 = *(npy_datetime *)ip1;
@@ -1384,7 +1268,7 @@ DATETIME_MM_m_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void
}
NPY_NO_EXPORT void
-TIMEDELTA_mm_m_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_mm_m_subtract(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1400,7 +1284,7 @@ TIMEDELTA_mm_m_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void
/* Note: Assuming 'q' == NPY_LONGLONG */
NPY_NO_EXPORT void
-TIMEDELTA_mq_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_mq_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1416,7 +1300,7 @@ TIMEDELTA_mq_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void
/* Note: Assuming 'q' == NPY_LONGLONG */
NPY_NO_EXPORT void
-TIMEDELTA_qm_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_qm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_int64 in1 = *(npy_int64 *)ip1;
@@ -1431,7 +1315,7 @@ TIMEDELTA_qm_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void
}
NPY_NO_EXPORT void
-TIMEDELTA_md_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_md_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1452,7 +1336,7 @@ TIMEDELTA_md_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void
}
NPY_NO_EXPORT void
-TIMEDELTA_dm_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_dm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const double in1 = *(double *)ip1;
@@ -1474,7 +1358,7 @@ TIMEDELTA_dm_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void
/* Note: Assuming 'q' == NPY_LONGLONG */
NPY_NO_EXPORT void
-TIMEDELTA_mq_m_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1489,7 +1373,7 @@ TIMEDELTA_mq_m_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
}
NPY_NO_EXPORT void
-TIMEDELTA_md_m_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_md_m_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1510,7 +1394,7 @@ TIMEDELTA_md_m_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
}
NPY_NO_EXPORT void
-TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_mm_d_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1525,7 +1409,7 @@ TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
}
NPY_NO_EXPORT void
-TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_mm_m_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1553,7 +1437,7 @@ TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, voi
}
NPY_NO_EXPORT void
-TIMEDELTA_mm_q_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1578,7 +1462,7 @@ TIMEDELTA_mm_q_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps,
}
NPY_NO_EXPORT void
-TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+TIMEDELTA_mm_qm_divmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP_TWO_OUT {
const npy_timedelta in1 = *(npy_timedelta *)ip1;
@@ -1622,7 +1506,7 @@ TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void
*/
NPY_NO_EXPORT void
-@TYPE@_sqrt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_sqrt(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (!run_unary_simd_sqrt_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
@@ -1646,7 +1530,7 @@ NPY_NO_EXPORT void
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_@func@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1664,7 +1548,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+FLOAT_@func@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
UNARY_LOOP {
const npy_float in1 = *(npy_float *)ip1;
@@ -1687,7 +1571,7 @@ FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSE
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_sqrt_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_sqrt_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
if (!run_unary_@isa@_sqrt_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
@@ -1698,7 +1582,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void
}
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_absolute_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_absolute_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
if (!run_unary_@isa@_absolute_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
@@ -1712,7 +1596,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void
}
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_square_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_square_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
if (!run_unary_@isa@_square_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
@@ -1723,7 +1607,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void
}
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_reciprocal_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_reciprocal_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
if (!run_unary_@isa@_reciprocal_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
@@ -1739,7 +1623,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
if (!run_unary_@isa@_@func@_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
@@ -1758,7 +1642,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+FLOAT_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
if (!run_unary_@isa@_@func@_FLOAT(args, dimensions, steps)) {
UNARY_LOOP {
@@ -1787,7 +1671,7 @@ FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+FLOAT_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
if (!run_unary_@isa@_sincos_FLOAT(args, dimensions, steps, @enum@)) {
UNARY_LOOP {
@@ -1900,7 +1784,7 @@ pairwise_sum_@TYPE@(char *a, npy_intp n, npy_intp stride)
* # PW = 1, 0, 0, 0#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (IS_BINARY_REDUCE) {
#if @PW@
@@ -1931,7 +1815,7 @@ NPY_NO_EXPORT void
* #OP = ==, !=, <, <=, >, >=, &&, ||#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (!run_binary_simd_@kind@_@TYPE@(args, dimensions, steps)) {
BINARY_LOOP {
@@ -1940,11 +1824,12 @@ NPY_NO_EXPORT void
*((npy_bool *)op1) = in1 @OP@ in2;
}
}
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_logical_xor(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const int t1 = !!*(@type@ *)ip1;
@@ -1954,7 +1839,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_logical_not(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1967,7 +1852,7 @@ NPY_NO_EXPORT void
* #func = npy_isnan, npy_isinf, npy_isfinite, npy_signbit#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (!run_@kind@_simd_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
@@ -1980,7 +1865,7 @@ NPY_NO_EXPORT void
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_spacing(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_spacing(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1989,7 +1874,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_copysign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_copysign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -1999,7 +1884,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_nextafter(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_nextafter(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -2013,7 +1898,35 @@ NPY_NO_EXPORT void
* #OP = >=, <=#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@_avx512f(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
+{
+ /* */
+ if (IS_BINARY_REDUCE) {
+ if (!run_unary_reduce_simd_@kind@_@TYPE@(args, dimensions, steps)) {
+ BINARY_REDUCE_LOOP(@type@) {
+ const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
+ io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
+ }
+ *((@type@ *)iop1) = io1;
+ }
+ }
+ else {
+ if (!run_binary_avx512f_@kind@_@TYPE@(args, dimensions, steps)) {
+ BINARY_LOOP {
+ @type@ in1 = *(@type@ *)ip1;
+ const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
+ in1 = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2;
+ *((@type@ *)op1) = in1;
+ }
+ }
+ }
+ npy_clear_floatstatus_barrier((char*)dimensions);
+}
+
+NPY_NO_EXPORT void
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/* */
if (IS_BINARY_REDUCE) {
@@ -2044,7 +1957,7 @@ NPY_NO_EXPORT void
* #OP = >=, <=#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/* */
if (IS_BINARY_REDUCE) {
@@ -2068,7 +1981,7 @@ NPY_NO_EXPORT void
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -2079,7 +1992,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -2089,7 +2002,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_divmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP_TWO_OUT {
const @type@ in1 = *(@type@ *)ip1;
@@ -2099,7 +2012,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_square(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_square(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
char * margs[] = {args[0], args[0], args[1]};
npy_intp msteps[] = {steps[0], steps[0], steps[1]};
@@ -2112,7 +2025,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_reciprocal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_reciprocal(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
@type@ one = 1.@c@;
char * margs[] = {(char*)&one, args[0], args[1]};
@@ -2126,7 +2039,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
OUTPUT_LOOP {
*((@type@ *)op1) = 1;
@@ -2134,7 +2047,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_conjugate(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -2143,7 +2056,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_absolute(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (!run_unary_simd_absolute_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
@@ -2157,7 +2070,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_negative(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_negative(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (!run_unary_simd_negative_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
@@ -2168,7 +2081,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_positive(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -2177,17 +2090,18 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/* Sign of nan is nan */
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
*((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : (in1 == 0 ? 0 : in1));
}
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
NPY_NO_EXPORT void
-@TYPE@_modf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_modf(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_TWO_OUT {
const @type@ in1 = *(@type@ *)ip1;
@@ -2196,7 +2110,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_frexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_frexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_TWO_OUT {
const @type@ in1 = *(@type@ *)ip1;
@@ -2205,7 +2119,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_ldexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_ldexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
@@ -2215,7 +2129,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_ldexp_long(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/*
* Additional loop to handle npy_long integer inputs (cf. #866, #1633).
@@ -2262,7 +2176,7 @@ NPY_NO_EXPORT void
* # PW = 1, 0, 0, 0#
*/
NPY_NO_EXPORT void
-HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (IS_BINARY_REDUCE) {
char *iop1 = args[0];
@@ -2297,7 +2211,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
* npy_half_ge, _HALF_LOGICAL_AND, _HALF_LOGICAL_OR#
*/
NPY_NO_EXPORT void
-HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2310,7 +2224,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
#undef _HALF_LOGICAL_OR
NPY_NO_EXPORT void
-HALF_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_logical_xor(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const int in1 = !npy_half_iszero(*(npy_half *)ip1);
@@ -2320,7 +2234,7 @@ HALF_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_U
}
NPY_NO_EXPORT void
-HALF_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_logical_not(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2333,7 +2247,7 @@ HALF_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_U
* #func = npy_half_isnan, npy_half_isinf, npy_half_isfinite, npy_half_signbit#
**/
NPY_NO_EXPORT void
-HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2344,7 +2258,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
/**end repeat**/
NPY_NO_EXPORT void
-HALF_spacing(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_spacing(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2353,7 +2267,7 @@ HALF_spacing(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSE
}
NPY_NO_EXPORT void
-HALF_copysign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_copysign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2363,7 +2277,7 @@ HALF_copysign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUS
}
NPY_NO_EXPORT void
-HALF_nextafter(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_nextafter(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2377,7 +2291,7 @@ HALF_nextafter(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU
* #OP = npy_half_ge, npy_half_le#
**/
NPY_NO_EXPORT void
-HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/* */
BINARY_LOOP {
@@ -2394,7 +2308,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
* #OP = npy_half_ge, npy_half_le#
**/
NPY_NO_EXPORT void
-HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/* */
BINARY_LOOP {
@@ -2407,7 +2321,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
/**end repeat**/
NPY_NO_EXPORT void
-HALF_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2418,7 +2332,7 @@ HALF_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_
}
NPY_NO_EXPORT void
-HALF_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2428,7 +2342,7 @@ HALF_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU
}
NPY_NO_EXPORT void
-HALF_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_divmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP_TWO_OUT {
const npy_half in1 = *(npy_half *)ip1;
@@ -2438,7 +2352,7 @@ HALF_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
}
NPY_NO_EXPORT void
-HALF_square(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+HALF_square(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
UNARY_LOOP {
const float in1 = npy_half_to_float(*(npy_half *)ip1);
@@ -2447,7 +2361,7 @@ HALF_square(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
}
NPY_NO_EXPORT void
-HALF_reciprocal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+HALF_reciprocal(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
UNARY_LOOP {
const float in1 = npy_half_to_float(*(npy_half *)ip1);
@@ -2456,7 +2370,7 @@ HALF_reciprocal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN
}
NPY_NO_EXPORT void
-HALF__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+HALF__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
OUTPUT_LOOP {
*((npy_half *)op1) = NPY_HALF_ONE;
@@ -2464,7 +2378,7 @@ HALF__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN
}
NPY_NO_EXPORT void
-HALF_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_conjugate(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2473,13 +2387,13 @@ HALF_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU
}
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-HALF_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_absolute(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_FAST(npy_half, npy_half, *out = in&0x7fffu);
}
NPY_NO_EXPORT void
-HALF_negative(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_negative(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2488,7 +2402,7 @@ HALF_negative(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUS
}
NPY_NO_EXPORT void
-HALF_positive(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const npy_half in1 = *(npy_half *)ip1;
@@ -2497,7 +2411,7 @@ HALF_positive(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUS
}
NPY_NO_EXPORT void
-HALF_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/* Sign of nan is nan */
UNARY_LOOP {
@@ -2509,7 +2423,7 @@ HALF_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(f
}
NPY_NO_EXPORT void
-HALF_modf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_modf(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
float temp;
@@ -2521,7 +2435,7 @@ HALF_modf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(f
}
NPY_NO_EXPORT void
-HALF_frexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_frexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP_TWO_OUT {
const float in1 = npy_half_to_float(*(npy_half *)ip1);
@@ -2530,7 +2444,7 @@ HALF_frexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(
}
NPY_NO_EXPORT void
-HALF_ldexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_ldexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const float in1 = npy_half_to_float(*(npy_half *)ip1);
@@ -2540,7 +2454,7 @@ HALF_ldexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(
}
NPY_NO_EXPORT void
-HALF_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+HALF_ldexp_long(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/*
* Additional loop to handle npy_long integer inputs (cf. #866, #1633).
@@ -2595,6 +2509,7 @@ HALF_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN
* #ftype = npy_float, npy_double, npy_longdouble#
* #c = f, , l#
* #C = F, , L#
+ * #SIMD = 1, 1, 0#
*/
/* similar to pairwise sum of real floats */
@@ -2670,6 +2585,7 @@ pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, char * a, npy_intp n,
}
}
+
/**begin repeat1
* arithmetic
* #kind = add, subtract#
@@ -2677,7 +2593,7 @@ pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, char * a, npy_intp n,
* #PW = 1, 0#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
if (IS_BINARY_REDUCE && @PW@) {
npy_intp n = dimensions[0];
@@ -2704,7 +2620,7 @@ NPY_NO_EXPORT void
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2717,7 +2633,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2748,8 +2664,34 @@ NPY_NO_EXPORT void
}
}
+#if @SIMD@
NPY_NO_EXPORT void
-@TYPE@_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_add_avx512f(char **args, const npy_intp *dimensions, const npy_intp *steps, void *func)
+{
+ if (IS_BINARY_REDUCE) {
+ @TYPE@_add(args, dimensions, steps, func);
+ }
+ else if (!run_binary_avx512f_add_@TYPE@(args, dimensions, steps)) {
+ @TYPE@_add(args, dimensions, steps, func);
+ }
+}
+
+/**begin repeat1
+ * arithmetic
+ * #kind = subtract, multiply#
+ */
+NPY_NO_EXPORT void
+@TYPE@_@kind@_avx512f(char **args, const npy_intp *dimensions, const npy_intp *steps, void *func)
+{
+ if (!run_binary_avx512f_@kind@_@TYPE@(args, dimensions, steps)) {
+ @TYPE@_@kind@(args, dimensions, steps, func);
+ }
+}
+/**end repeat1**/
+#endif
+
+NPY_NO_EXPORT void
+@TYPE@_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2774,7 +2716,7 @@ NPY_NO_EXPORT void
* #OP = CGT, CGE, CLT, CLE, CEQ, CNE#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2792,7 +2734,7 @@ NPY_NO_EXPORT void
#OP2 = &&, ||#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2805,7 +2747,7 @@ NPY_NO_EXPORT void
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_logical_xor(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2819,7 +2761,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_logical_not(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2834,7 +2776,7 @@ NPY_NO_EXPORT void
* #OP = ||, ||, &&#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2846,7 +2788,7 @@ NPY_NO_EXPORT void
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_square(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_square(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
UNARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2857,7 +2799,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_reciprocal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@_reciprocal(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
UNARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2877,7 +2819,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+@TYPE@__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data))
{
OUTPUT_LOOP {
((@ftype@ *)op1)[0] = 1;
@@ -2886,7 +2828,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) {
+@TYPE@_conjugate(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) {
UNARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
const @ftype@ in1i = ((@ftype@ *)ip1)[1];
@@ -2896,7 +2838,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_absolute(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2905,8 +2847,23 @@ NPY_NO_EXPORT void
}
}
+#if @SIMD@
+/**begin repeat1
+ * arithmetic
+ * #kind = conjugate, square, absolute#
+ */
+NPY_NO_EXPORT void
+@TYPE@_@kind@_avx512f(char **args, const npy_intp *dimensions, const npy_intp *steps, void *func)
+{
+ if (!run_unary_avx512f_@kind@_@TYPE@(args, dimensions, steps)) {
+ @TYPE@_@kind@(args, dimensions, steps, func);
+ }
+}
+/**end repeat1**/
+#endif
+
NPY_NO_EXPORT void
-@TYPE@__arg(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@__arg(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
UNARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2916,7 +2873,7 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
-@TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
/* fixme: sign of nan is currently 0 */
UNARY_LOOP {
@@ -2934,7 +2891,7 @@ NPY_NO_EXPORT void
* #OP = CGE, CLE#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
@ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -2957,7 +2914,7 @@ NPY_NO_EXPORT void
* #OP = CGE, CLE#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @ftype@ in1r = ((@ftype@ *)ip1)[0];
@@ -3005,7 +2962,7 @@ NPY_NO_EXPORT void
* #as_bool = 1, 0#
*/
NPY_NO_EXPORT void
-OBJECT@suffix@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) {
+OBJECT@suffix@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) {
BINARY_LOOP {
PyObject *ret_obj;
PyObject *in1 = *(PyObject **)ip1;
@@ -3041,7 +2998,7 @@ OBJECT@suffix@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *
/**end repeat**/
NPY_NO_EXPORT void
-OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+OBJECT_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
PyObject *zero = PyLong_FromLong(0);
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index e98a1ac3c..e9d0b4c62 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -7,14 +7,12 @@
#define _NPY_UMATH_LOOPS_H_
#define BOOL_invert BOOL_logical_not
-#define BOOL_negative BOOL_logical_not
#define BOOL_add BOOL_logical_or
#define BOOL_bitwise_and BOOL_logical_and
#define BOOL_bitwise_or BOOL_logical_or
#define BOOL_logical_xor BOOL_not_equal
#define BOOL_bitwise_xor BOOL_logical_xor
#define BOOL_multiply BOOL_logical_and
-#define BOOL_subtract BOOL_logical_xor
#define BOOL_maximum BOOL_logical_or
#define BOOL_minimum BOOL_logical_and
#define BOOL_fmax BOOL_maximum
@@ -32,17 +30,17 @@
* logical_and, logical_or, absolute, logical_not#
**/
NPY_NO_EXPORT void
-BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat**/
NPY_NO_EXPORT void
-BOOL__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+BOOL__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
/**begin repeat
* #kind = isnan, isinf, isfinite#
**/
NPY_NO_EXPORT void
-BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat**/
/*
@@ -66,32 +64,32 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
#define @S@@TYPE@_fmin @S@@TYPE@_minimum
NPY_NO_EXPORT void
-@S@@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+@S@@TYPE@__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
NPY_NO_EXPORT void
-@S@@TYPE@_positive(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**begin repeat2
* #isa = , _avx2#
*/
NPY_NO_EXPORT void
-@S@@TYPE@_square@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+@S@@TYPE@_square@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
NPY_NO_EXPORT void
-@S@@TYPE@_reciprocal@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+@S@@TYPE@_reciprocal@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
NPY_NO_EXPORT void
-@S@@TYPE@_conjugate@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_conjugate@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_negative@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_negative@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_logical_not@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_logical_not@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_invert@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_invert@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**begin repeat3
* Arithmetic
@@ -100,7 +98,7 @@ NPY_NO_EXPORT void
* #OP = +, -,*, &, |, ^, <<, >>#
*/
NPY_NO_EXPORT void
-@S@@TYPE@_@kind@@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_@kind@@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat3**/
@@ -110,12 +108,12 @@ NPY_NO_EXPORT void
* #OP = ==, !=, >, >=, <, <=, &&, ||#
*/
NPY_NO_EXPORT void
-@S@@TYPE@_@kind@@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_@kind@@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat3**/
NPY_NO_EXPORT void
-@S@@TYPE@_logical_xor@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_logical_xor@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat2**/
/**begin repeat2
@@ -123,41 +121,41 @@ NPY_NO_EXPORT void
* #OP = >, <#
**/
NPY_NO_EXPORT void
-@S@@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat2**/
NPY_NO_EXPORT void
-@S@@TYPE@_power(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_power(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_fmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_fmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_absolute(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_divmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_gcd(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_gcd(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@S@@TYPE@_lcm(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_lcm(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**begin repeat2
* #kind = isnan, isinf, isfinite#
**/
NPY_NO_EXPORT void
-@S@@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@S@@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat2**/
/**end repeat1**/
@@ -174,7 +172,15 @@ NPY_NO_EXPORT void
* #TYPE = FLOAT, DOUBLE#
*/
NPY_NO_EXPORT void
-@TYPE@_sqrt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_sqrt(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
+
+/**begin repeat1
+ * #func = maximum, minimum#
+ */
+NPY_NO_EXPORT void
+@TYPE@_@func@_avx512f(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
+
+/**end repeat1**/
/**begin repeat1
* #isa = avx512f, fma#
@@ -184,7 +190,7 @@ NPY_NO_EXPORT void
* #func = sqrt, absolute, square, reciprocal#
*/
NPY_NO_EXPORT void
-@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat2**/
/**end repeat1**/
@@ -194,14 +200,14 @@ NPY_NO_EXPORT void
* #func = sin, cos, exp, log#
*/
NPY_NO_EXPORT void
-FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+FLOAT_@func@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**begin repeat1
* #isa = avx512f, fma#
*/
NPY_NO_EXPORT void
-FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+FLOAT_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
/**end repeat**/
@@ -215,13 +221,13 @@ FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+@TYPE@_@func@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
/**begin repeat2
* #isa = avx512f, fma#
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
-@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+@TYPE@_@func@_@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
/**end repeat2**/
/**end repeat1**/
/**end repeat**/
@@ -240,7 +246,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void
* # OP = +, -, *, /#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
/**begin repeat1
@@ -249,21 +255,21 @@ NPY_NO_EXPORT void
* #OP = ==, !=, <, <=, >, >=, &&, ||#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_logical_xor(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_logical_not(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**begin repeat1
* #kind = isnan, isinf, isfinite, signbit, copysign, nextafter, spacing#
* #func = npy_isnan, npy_isinf, npy_isfinite, npy_signbit, npy_copysign, nextafter, spacing#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
/**begin repeat1
@@ -271,7 +277,7 @@ NPY_NO_EXPORT void
* #OP = >=, <=#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
/**begin repeat1
@@ -279,54 +285,53 @@ NPY_NO_EXPORT void
* #OP = >=, <=#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_divmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_square(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+@TYPE@_square(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
NPY_NO_EXPORT void
-@TYPE@_reciprocal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
-
+@TYPE@_reciprocal(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
NPY_NO_EXPORT void
-@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+@TYPE@__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
NPY_NO_EXPORT void
-@TYPE@_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_conjugate(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_absolute(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_negative(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_negative(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_positive(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-
+@TYPE@_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_modf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_modf(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_frexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_frexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_ldexp(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_ldexp(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
+
NPY_NO_EXPORT void
-@TYPE@_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_ldexp_long(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
#define @TYPE@_true_divide @TYPE@_divide
@@ -351,33 +356,40 @@ NPY_NO_EXPORT void
* #TYPE = FLOAT, DOUBLE, LONGDOUBLE#
* #c = f, , l#
* #C = F, , L#
+ * #IFSIMD = 1, 1, 0#
*/
/**begin repeat1
+ * #isa = , _avx512f#
+ */
+
+/**begin repeat2
* arithmetic
* #kind = add, subtract#
* #OP = +, -#
*/
+
NPY_NO_EXPORT void
-C@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_@kind@@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
-/**end repeat1**/
+/**end repeat2**/
NPY_NO_EXPORT void
-C@TYPE@_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_multiply@isa@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
+/**end repeat1**/
NPY_NO_EXPORT void
-C@TYPE@_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-C@TYPE@_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**begin repeat1
* #kind= greater, greater_equal, less, less_equal, equal, not_equal#
* #OP = CGT, CGE, CLT, CLE, CEQ, CNE#
*/
NPY_NO_EXPORT void
-C@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
/**begin repeat1
@@ -386,50 +398,55 @@ C@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU
#OP2 = &&, ||#
*/
NPY_NO_EXPORT void
-C@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
NPY_NO_EXPORT void
-C@TYPE@_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_logical_xor(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-C@TYPE@_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_logical_not(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**begin repeat1
* #kind = isnan, isinf, isfinite#
* #func = npy_isnan, npy_isinf, npy_isfinite#
* #OP = ||, ||, &&#
**/
NPY_NO_EXPORT void
-C@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
NPY_NO_EXPORT void
-C@TYPE@_square(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+C@TYPE@_reciprocal(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
NPY_NO_EXPORT void
-C@TYPE@_reciprocal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+C@TYPE@__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
+
+/**begin repeat1
+ * #isa = , _avx512f#
+ */
NPY_NO_EXPORT void
-C@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+C@TYPE@_conjugate@isa@(char **args, const npy_intp *dimensions, const npy_intp *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-C@TYPE@_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_absolute@isa@(char **args, const npy_intp *dimensions, const npy_intp *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-C@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_square@isa@(char **args, const npy_intp *dimensions, const npy_intp *steps, void *NPY_UNUSED(data));
+/**end repeat1**/
NPY_NO_EXPORT void
-C@TYPE@__arg(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@__arg(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-C@TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**begin repeat1
* #kind = maximum, minimum#
* #OP = CGE, CLE#
*/
NPY_NO_EXPORT void
-C@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
/**begin repeat1
@@ -437,9 +454,8 @@ C@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU
* #OP = CGE, CLE#
*/
NPY_NO_EXPORT void
-C@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+C@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
-
#define C@TYPE@_true_divide C@TYPE@_divide
/**end repeat**/
@@ -458,95 +474,99 @@ C@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU
*/
NPY_NO_EXPORT void
-TIMEDELTA_negative(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_negative(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_positive(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_absolute(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**begin repeat
* #TYPE = DATETIME, TIMEDELTA#
*/
NPY_NO_EXPORT void
-@TYPE@_isnat(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_isnat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
+
+NPY_NO_EXPORT void
+@TYPE@_isfinite(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-@TYPE@_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_isinf(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
+
+#define @TYPE@_isnan @TYPE@_isnat
NPY_NO_EXPORT void
-@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+@TYPE@__ones_like(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
/**begin repeat1
* #kind = equal, not_equal, greater, greater_equal, less, less_equal#
* #OP = ==, !=, >, >=, <, <=#
*/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
/**begin repeat1
- * #kind = maximum, minimum#
- * #OP = >, <#
+ * #kind = maximum, minimum, fmin, fmax#
**/
NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
/**end repeat**/
NPY_NO_EXPORT void
-DATETIME_Mm_M_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+DATETIME_Mm_M_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data));
NPY_NO_EXPORT void
-DATETIME_mM_M_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+DATETIME_mM_M_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_mm_m_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_mm_m_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-DATETIME_Mm_M_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+DATETIME_Mm_M_subtract(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-DATETIME_MM_m_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+DATETIME_MM_m_subtract(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_mm_m_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_mm_m_subtract(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_mq_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_mq_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_qm_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_qm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_md_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_md_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_dm_m_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_dm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_mq_m_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_md_m_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_md_m_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_mm_d_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_mm_q_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_mm_q_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_mm_m_remainder(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+TIMEDELTA_mm_qm_divmod(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/* Special case equivalents to above functions */
@@ -556,10 +576,6 @@ TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void
#define TIMEDELTA_mq_m_floor_divide TIMEDELTA_mq_m_divide
#define TIMEDELTA_md_m_floor_divide TIMEDELTA_md_m_divide
/* #define TIMEDELTA_mm_d_floor_divide TIMEDELTA_mm_d_divide */
-#define TIMEDELTA_fmin TIMEDELTA_minimum
-#define TIMEDELTA_fmax TIMEDELTA_maximum
-#define DATETIME_fmin DATETIME_minimum
-#define DATETIME_fmax DATETIME_maximum
/*
*****************************************************************************
@@ -575,15 +591,15 @@ TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void
* #suffix = , _OO_O#
*/
NPY_NO_EXPORT void
-OBJECT@suffix@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+OBJECT@suffix@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat1**/
/**end repeat**/
NPY_NO_EXPORT void
-OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+OBJECT_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
-PyUFunc_OOO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func);
+PyUFunc_OOO_O(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func);
/*
*****************************************************************************
diff --git a/numpy/core/src/umath/matmul.c.src b/numpy/core/src/umath/matmul.c.src
index b5204eca5..5cbb6e94d 100644
--- a/numpy/core/src/umath/matmul.c.src
+++ b/numpy/core/src/umath/matmul.c.src
@@ -31,7 +31,11 @@
* -1 to be conservative, in case blas internally uses a for loop with an
* inclusive upper bound
*/
+#ifndef HAVE_BLAS_ILP64
#define BLAS_MAXSIZE (NPY_MAX_INT - 1)
+#else
+#define BLAS_MAXSIZE (NPY_MAX_INT64 - 1)
+#endif
/*
* Determine if a 2d matrix can be used by BLAS
@@ -84,25 +88,25 @@ NPY_NO_EXPORT void
* op: data in c order, m shape
*/
enum CBLAS_ORDER order;
- int M, N, lda;
+ CBLAS_INT M, N, lda;
assert(m <= BLAS_MAXSIZE && n <= BLAS_MAXSIZE);
assert (is_blasable2d(is2_n, sizeof(@typ@), n, 1, sizeof(@typ@)));
- M = (int)m;
- N = (int)n;
+ M = (CBLAS_INT)m;
+ N = (CBLAS_INT)n;
if (is_blasable2d(is1_m, is1_n, m, n, sizeof(@typ@))) {
order = CblasColMajor;
- lda = (int)(is1_m / sizeof(@typ@));
+ lda = (CBLAS_INT)(is1_m / sizeof(@typ@));
}
else {
/* If not ColMajor, caller should have ensured we are RowMajor */
/* will not assert in release mode */
order = CblasRowMajor;
assert(is_blasable2d(is1_n, is1_m, n, m, sizeof(@typ@)));
- lda = (int)(is1_n / sizeof(@typ@));
+ lda = (CBLAS_INT)(is1_n / sizeof(@typ@));
}
- cblas_@prefix@gemv(order, CblasTrans, N, M, @step1@, ip1, lda, ip2,
+ CBLAS_FUNC(cblas_@prefix@gemv)(order, CblasTrans, N, M, @step1@, ip1, lda, ip2,
is2_n / sizeof(@typ@), @step0@, op, op_m / sizeof(@typ@));
}
@@ -117,37 +121,37 @@ NPY_NO_EXPORT void
*/
enum CBLAS_ORDER order = CblasRowMajor;
enum CBLAS_TRANSPOSE trans1, trans2;
- int M, N, P, lda, ldb, ldc;
+ CBLAS_INT M, N, P, lda, ldb, ldc;
assert(m <= BLAS_MAXSIZE && n <= BLAS_MAXSIZE && p <= BLAS_MAXSIZE);
- M = (int)m;
- N = (int)n;
- P = (int)p;
+ M = (CBLAS_INT)m;
+ N = (CBLAS_INT)n;
+ P = (CBLAS_INT)p;
assert(is_blasable2d(os_m, os_p, m, p, sizeof(@typ@)));
- ldc = (int)(os_m / sizeof(@typ@));
+ ldc = (CBLAS_INT)(os_m / sizeof(@typ@));
if (is_blasable2d(is1_m, is1_n, m, n, sizeof(@typ@))) {
trans1 = CblasNoTrans;
- lda = (int)(is1_m / sizeof(@typ@));
+ lda = (CBLAS_INT)(is1_m / sizeof(@typ@));
}
else {
/* If not ColMajor, caller should have ensured we are RowMajor */
/* will not assert in release mode */
assert(is_blasable2d(is1_n, is1_m, n, m, sizeof(@typ@)));
trans1 = CblasTrans;
- lda = (int)(is1_n / sizeof(@typ@));
+ lda = (CBLAS_INT)(is1_n / sizeof(@typ@));
}
if (is_blasable2d(is2_n, is2_p, n, p, sizeof(@typ@))) {
trans2 = CblasNoTrans;
- ldb = (int)(is2_n / sizeof(@typ@));
+ ldb = (CBLAS_INT)(is2_n / sizeof(@typ@));
}
else {
/* If not ColMajor, caller should have ensured we are RowMajor */
/* will not assert in release mode */
assert(is_blasable2d(is2_p, is2_n, p, n, sizeof(@typ@)));
trans2 = CblasTrans;
- ldb = (int)(is2_p / sizeof(@typ@));
+ ldb = (CBLAS_INT)(is2_p / sizeof(@typ@));
}
/*
* Use syrk if we have a case of a matrix times its transpose.
@@ -162,12 +166,14 @@ NPY_NO_EXPORT void
) {
npy_intp i,j;
if (trans1 == CblasNoTrans) {
- cblas_@prefix@syrk(order, CblasUpper, trans1, P, N, @step1@,
- ip1, lda, @step0@, op, ldc);
+ CBLAS_FUNC(cblas_@prefix@syrk)(
+ order, CblasUpper, trans1, P, N, @step1@,
+ ip1, lda, @step0@, op, ldc);
}
else {
- cblas_@prefix@syrk(order, CblasUpper, trans1, P, N, @step1@,
- ip1, ldb, @step0@, op, ldc);
+ CBLAS_FUNC(cblas_@prefix@syrk)(
+ order, CblasUpper, trans1, P, N, @step1@,
+ ip1, ldb, @step0@, op, ldc);
}
/* Copy the triangle */
for (i = 0; i < P; i++) {
@@ -178,8 +184,9 @@ NPY_NO_EXPORT void
}
else {
- cblas_@prefix@gemm(order, trans1, trans2, M, P, N, @step1@, ip1, lda,
- ip2, ldb, @step0@, op, ldc);
+ CBLAS_FUNC(cblas_@prefix@gemm)(
+ order, trans1, trans2, M, P, N, @step1@, ip1, lda,
+ ip2, ldb, @step0@, op, ldc);
}
}
@@ -388,7 +395,7 @@ OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
NPY_NO_EXPORT void
-@TYPE@_matmul(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+@TYPE@_matmul(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
npy_intp dOuter = *dimensions++;
npy_intp iOuter;
diff --git a/numpy/core/src/umath/matmul.h.src b/numpy/core/src/umath/matmul.h.src
index a664b1b4e..18940e2f2 100644
--- a/numpy/core/src/umath/matmul.h.src
+++ b/numpy/core/src/umath/matmul.h.src
@@ -6,7 +6,7 @@
* BOOL, OBJECT#
**/
NPY_NO_EXPORT void
-@TYPE@_matmul(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+@TYPE@_matmul(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func));
/**end repeat**/
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index 8d67f96ac..bf6e5a698 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -112,9 +112,16 @@ fail:
static int
normalize_signature_keyword(PyObject *normal_kwds)
{
- PyObject* obj = PyDict_GetItemString(normal_kwds, "sig");
+ PyObject *obj = _PyDict_GetItemStringWithError(normal_kwds, "sig");
+ if (obj == NULL && PyErr_Occurred()){
+ return -1;
+ }
if (obj != NULL) {
- if (PyDict_GetItemString(normal_kwds, "signature")) {
+ PyObject *sig = _PyDict_GetItemStringWithError(normal_kwds, "signature");
+ if (sig == NULL && PyErr_Occurred()) {
+ return -1;
+ }
+ if (sig) {
PyErr_SetString(PyExc_TypeError,
"cannot specify both 'sig' and 'signature'");
return -1;
@@ -165,11 +172,17 @@ normalize___call___args(PyUFuncObject *ufunc, PyObject *args,
/* If we have more args than nin, they must be the output variables.*/
if (nargs > nin) {
- if(nkwds > 0 && PyDict_GetItemString(*normal_kwds, "out")) {
- PyErr_Format(PyExc_TypeError,
- "argument given by name ('out') and position "
- "(%"NPY_INTP_FMT")", nin);
- return -1;
+ if (nkwds > 0) {
+ PyObject *out_kwd = _PyDict_GetItemStringWithError(*normal_kwds, "out");
+ if (out_kwd == NULL && PyErr_Occurred()) {
+ return -1;
+ }
+ else if (out_kwd) {
+ PyErr_Format(PyExc_TypeError,
+ "argument given by name ('out') and position "
+ "(%"NPY_INTP_FMT")", nin);
+ return -1;
+ }
}
for (i = nin; i < nargs; i++) {
not_all_none = (PyTuple_GET_ITEM(args, i) != Py_None);
@@ -204,11 +217,20 @@ normalize___call___args(PyUFuncObject *ufunc, PyObject *args,
}
}
/* gufuncs accept either 'axes' or 'axis', but not both */
- if (nkwds >= 2 && (PyDict_GetItemString(*normal_kwds, "axis") &&
- PyDict_GetItemString(*normal_kwds, "axes"))) {
- PyErr_SetString(PyExc_TypeError,
- "cannot specify both 'axis' and 'axes'");
- return -1;
+ if (nkwds >= 2) {
+ PyObject *axis_kwd = _PyDict_GetItemStringWithError(*normal_kwds, "axis");
+ if (axis_kwd == NULL && PyErr_Occurred()) {
+ return -1;
+ }
+ PyObject *axes_kwd = _PyDict_GetItemStringWithError(*normal_kwds, "axes");
+ if (axes_kwd == NULL && PyErr_Occurred()) {
+ return -1;
+ }
+ if (axis_kwd && axes_kwd) {
+ PyErr_SetString(PyExc_TypeError,
+ "cannot specify both 'axis' and 'axes'");
+ return -1;
+ }
}
/* finally, ufuncs accept 'sig' or 'signature' normalize to 'signature' */
return nkwds == 0 ? 0 : normalize_signature_keyword(*normal_kwds);
@@ -243,7 +265,11 @@ normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args,
}
for (i = 1; i < nargs; i++) {
- if (PyDict_GetItemString(*normal_kwds, kwlist[i])) {
+ PyObject *kwd = _PyDict_GetItemStringWithError(*normal_kwds, kwlist[i]);
+ if (kwd == NULL && PyErr_Occurred()) {
+ return -1;
+ }
+ else if (kwd) {
PyErr_Format(PyExc_TypeError,
"argument given by name ('%s') and position "
"(%"NPY_INTP_FMT")", kwlist[i], i);
@@ -293,7 +319,11 @@ normalize_accumulate_args(PyUFuncObject *ufunc, PyObject *args,
}
for (i = 1; i < nargs; i++) {
- if (PyDict_GetItemString(*normal_kwds, kwlist[i])) {
+ PyObject *kwd = _PyDict_GetItemStringWithError(*normal_kwds, kwlist[i]);
+ if (kwd == NULL && PyErr_Occurred()) {
+ return -1;
+ }
+ else if (kwd) {
PyErr_Format(PyExc_TypeError,
"argument given by name ('%s') and position "
"(%"NPY_INTP_FMT")", kwlist[i], i);
@@ -341,7 +371,11 @@ normalize_reduceat_args(PyUFuncObject *ufunc, PyObject *args,
}
for (i = 2; i < nargs; i++) {
- if (PyDict_GetItemString(*normal_kwds, kwlist[i])) {
+ PyObject *kwd = _PyDict_GetItemStringWithError(*normal_kwds, kwlist[i]);
+ if (kwd == NULL && PyErr_Occurred()) {
+ return -1;
+ }
+ else if (kwd) {
PyErr_Format(PyExc_TypeError,
"argument given by name ('%s') and position "
"(%"NPY_INTP_FMT")", kwlist[i], i);
@@ -469,8 +503,11 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
/* ensure out is always a tuple */
normal_kwds = PyDict_Copy(kwds);
- out = PyDict_GetItemString(normal_kwds, "out");
- if (out != NULL) {
+ out = _PyDict_GetItemStringWithError(normal_kwds, "out");
+ if (out == NULL && PyErr_Occurred()) {
+ goto fail;
+ }
+ else if (out) {
int nout = ufunc->nout;
if (PyTuple_CheckExact(out)) {
@@ -494,32 +531,18 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
}
else {
/* not a tuple */
- if (nout > 1 && DEPRECATE("passing a single argument to the "
- "'out' keyword argument of a "
- "ufunc with\n"
- "more than one output will "
- "result in an error in the "
- "future") < 0) {
- /*
- * If the deprecation is removed, also remove the loop
- * below setting tuple items to None (but keep this future
- * error message.)
- */
+ if (nout > 1) {
PyErr_SetString(PyExc_TypeError,
"'out' must be a tuple of arguments");
goto fail;
}
if (out != Py_None) {
/* not already a tuple and not None */
- PyObject *out_tuple = PyTuple_New(nout);
+ PyObject *out_tuple = PyTuple_New(1);
if (out_tuple == NULL) {
goto fail;
}
- for (i = 1; i < nout; i++) {
- Py_INCREF(Py_None);
- PyTuple_SET_ITEM(out_tuple, i, Py_None);
- }
/* out was borrowed ref; make it permanent */
Py_INCREF(out);
/* steals reference */
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 4ce8d8ab7..79c302755 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -84,10 +84,12 @@ allocate_reduce_result(PyArrayObject *arr, const npy_bool *axis_flags,
* The return value is a view into 'out'.
*/
static PyArrayObject *
-conform_reduce_result(int ndim, const npy_bool *axis_flags,
+conform_reduce_result(PyArrayObject *in, const npy_bool *axis_flags,
PyArrayObject *out, int keepdims, const char *funcname,
int need_copy)
{
+ int ndim = PyArray_NDIM(in);
+ npy_intp *shape_in = PyArray_DIMS(in);
npy_intp strides[NPY_MAXDIMS], shape[NPY_MAXDIMS];
npy_intp *strides_out = PyArray_STRIDES(out);
npy_intp *shape_out = PyArray_DIMS(out);
@@ -118,6 +120,16 @@ conform_reduce_result(int ndim, const npy_bool *axis_flags,
return NULL;
}
}
+ else {
+ if (shape_out[idim] != shape_in[idim]) {
+ PyErr_Format(PyExc_ValueError,
+ "output parameter for reduction operation %s "
+ "has a non-reduction dimension not equal to "
+ "the input one.", funcname);
+ return NULL;
+ }
+ }
+
}
Py_INCREF(out);
@@ -138,6 +150,13 @@ conform_reduce_result(int ndim, const npy_bool *axis_flags,
"does not have enough dimensions", funcname);
return NULL;
}
+ if (shape_out[idim_out] != shape_in[idim]) {
+ PyErr_Format(PyExc_ValueError,
+ "output parameter for reduction operation %s "
+ "has a non-reduction dimension not equal to "
+ "the input one.", funcname);
+ return NULL;
+ }
strides[idim] = strides_out[idim_out];
shape[idim] = shape_out[idim_out];
++idim_out;
@@ -240,7 +259,7 @@ PyArray_CreateReduceResult(PyArrayObject *operand, PyArrayObject *out,
/* Steal the dtype reference */
Py_XDECREF(dtype);
- result = conform_reduce_result(PyArray_NDIM(operand), axis_flags,
+ result = conform_reduce_result(operand, axis_flags,
out, keepdims, funcname, need_copy);
}
diff --git a/numpy/core/src/umath/reduction.h b/numpy/core/src/umath/reduction.h
index dfaeabcbb..0c2183ed6 100644
--- a/numpy/core/src/umath/reduction.h
+++ b/numpy/core/src/umath/reduction.h
@@ -100,8 +100,8 @@ typedef int (PyArray_AssignReduceIdentityFunc)(PyArrayObject *result,
*/
typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter,
char **dataptr,
- npy_intp *strideptr,
- npy_intp *countptr,
+ npy_intp const *strideptr,
+ npy_intp const *countptr,
NpyIter_IterNextFunc *iternext,
int needs_api,
npy_intp skip_first_count,
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index d5d8d659b..bb2915e09 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -744,56 +744,50 @@ _@name@_convert2_to_ctypes(PyObject *a, @type@ *arg1,
/**end repeat**/
-#if defined(NPY_PY3K)
-#define CODEGEN_SKIP_divide_FLAG
-#endif
-
/**begin repeat
*
* #name = (byte, ubyte, short, ushort, int, uint,
- * long, ulong, longlong, ulonglong)*13,
+ * long, ulong, longlong, ulonglong)*12,
* (half, float, double, longdouble,
- * cfloat, cdouble, clongdouble)*6,
+ * cfloat, cdouble, clongdouble)*5,
* (half, float, double, longdouble)*2#
* #Name = (Byte, UByte, Short, UShort, Int, UInt,
- * Long, ULong,LongLong,ULongLong)*13,
+ * Long, ULong,LongLong,ULongLong)*12,
* (Half, Float, Double, LongDouble,
- * CFloat, CDouble, CLongDouble)*6,
+ * CFloat, CDouble, CLongDouble)*5,
* (Half, Float, Double, LongDouble)*2#
* #type = (npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong)*13,
+ * npy_long, npy_ulong, npy_longlong, npy_ulonglong)*12,
* (npy_half, npy_float, npy_double, npy_longdouble,
- * npy_cfloat, npy_cdouble, npy_clongdouble)*6,
+ * npy_cfloat, npy_cdouble, npy_clongdouble)*5,
* (npy_half, npy_float, npy_double, npy_longdouble)*2#
*
- * #oper = add*10, subtract*10, multiply*10, divide*10, remainder*10,
+ * #oper = add*10, subtract*10, multiply*10, remainder*10,
* divmod*10, floor_divide*10, lshift*10, rshift*10, and*10,
* or*10, xor*10, true_divide*10,
- * add*7, subtract*7, multiply*7, divide*7, floor_divide*7, true_divide*7,
+ * add*7, subtract*7, multiply*7, floor_divide*7, true_divide*7,
* divmod*4, remainder*4#
*
- * #fperr = 1*70,0*50,1*10,
- * 1*42,
+ * #fperr = 1*60,0*50,1*10,
+ * 1*35,
* 1*8#
- * #twoout = 0*50,1*10,0*70,
- * 0*42,
+ * #twoout = 0*40,1*10,0*70,
+ * 0*35,
* 1*4,0*4#
* #otype = (npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong)*12,
+ * npy_long, npy_ulong, npy_longlong, npy_ulonglong)*11,
* npy_float*4, npy_double*6,
* (npy_half, npy_float, npy_double, npy_longdouble,
- * npy_cfloat, npy_cdouble, npy_clongdouble)*6,
+ * npy_cfloat, npy_cdouble, npy_clongdouble)*5,
* (npy_half, npy_float, npy_double, npy_longdouble)*2#
* #OName = (Byte, UByte, Short, UShort, Int, UInt,
- * Long, ULong, LongLong, ULongLong)*12,
+ * Long, ULong, LongLong, ULongLong)*11,
* Float*4, Double*6,
* (Half, Float, Double, LongDouble,
- * CFloat, CDouble, CLongDouble)*6,
+ * CFloat, CDouble, CLongDouble)*5,
* (Half, Float, Double, LongDouble)*2#
*/
-#if !defined(CODEGEN_SKIP_@oper@_FLAG)
-
static PyObject *
@name@_@oper@(PyObject *a, PyObject *b)
{
@@ -904,12 +898,9 @@ static PyObject *
#endif
return ret;
}
-#endif
/**end repeat**/
-#undef CODEGEN_SKIP_divide_FLAG
-
#define _IS_ZERO(x) (x == 0)
/**begin repeat
@@ -1300,12 +1291,6 @@ static PyObject *
/**end repeat**/
-#if defined(NPY_PY3K)
-#define NONZERO_NAME(prefix) prefix##bool
-#else
-#define NONZERO_NAME(prefix) prefix##nonzero
-#endif
-
#define _IS_NONZERO(x) (x != 0)
/**begin repeat
*
@@ -1321,7 +1306,7 @@ static PyObject *
* #nonzero = _IS_NONZERO*10, !npy_half_iszero, _IS_NONZERO*6#
*/
static int
-NONZERO_NAME(@name@_)(PyObject *a)
+@name@_bool(PyObject *a)
{
int ret;
@type@ arg1;
@@ -1330,7 +1315,7 @@ NONZERO_NAME(@name@_)(PyObject *a)
if (PyErr_Occurred()) {
return -1;
}
- return PyGenericArrType_Type.tp_as_number->NONZERO_NAME(nb_)(a);
+ return PyGenericArrType_Type.tp_as_number->nb_bool(a);
}
/*
@@ -1410,15 +1395,6 @@ static PyObject *
return NULL;
}
-#ifndef NPY_PY3K
- /* Invoke long.__int__ to try to downcast */
- {
- PyObject *before_downcast = long_result;
- long_result = Py_TYPE(long_result)->tp_as_number->nb_int(long_result);
- Py_DECREF(before_downcast);
- }
-#endif
-
return long_result;
}
/**end repeat**/
@@ -1451,63 +1427,6 @@ static NPY_INLINE PyObject *
}
/**end repeat**/
-
-#if !defined(NPY_PY3K)
-
-/**begin repeat
- *
- * #name = (byte, ubyte, short, ushort, int, uint,
- * long, ulong, longlong, ulonglong,
- * half, float, double, longdouble,
- * cfloat, cdouble, clongdouble)#
- * #Name = (Byte, UByte, Short, UShort, Int, UInt,
- * Long, ULong, LongLong, ULongLong,
- * Half, Float, Double, LongDouble,
- * CFloat, CDouble, CLongDouble)#
- * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)#
- * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )#
- * #func = (PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,
- * PyLong_FromDouble*3, npy_longdouble_to_PyLong,
- * PyLong_FromDouble*2, npy_longdouble_to_PyLong#
- */
-static NPY_INLINE PyObject *
-@name@_long(PyObject *obj)
-{
-#if @cmplx@
- if (emit_complexwarning() < 0) {
- return NULL;
- }
- return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@).real));
-#else
- return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@)));
-#endif
-}
-/**end repeat**/
-
-/**begin repeat
- *
- * #name = (byte, ubyte, short, ushort, int, uint,
- * long, ulong, longlong, ulonglong,
- * half, float, double, longdouble,
- * cfloat, cdouble, clongdouble)*2#
- * #oper = oct*17, hex*17#
- * #kind = (int*5, long*5, int*2, long*2, int, long*2)*2#
- * #cap = (Int*5, Long*5, Int*2, Long*2, Int, Long*2)*2#
- */
-static PyObject *
-@name@_@oper@(PyObject *obj)
-{
- PyObject *pyint;
- pyint = @name@_@kind@(obj);
- if (pyint == NULL) {
- return NULL;
- }
- return Py@cap@_Type.tp_as_number->nb_@oper@(pyint);
-}
-/**end repeat**/
-
-#endif
-
/**begin repeat
* #oper = le, ge, lt, gt, eq, ne#
* #op = <=, >=, <, >, ==, !=#
@@ -1594,65 +1513,28 @@ static PyObject*
* cfloat, cdouble, clongdouble#
**/
static PyNumberMethods @name@_as_number = {
- (binaryfunc)@name@_add, /*nb_add*/
- (binaryfunc)@name@_subtract, /*nb_subtract*/
- (binaryfunc)@name@_multiply, /*nb_multiply*/
-#if !defined(NPY_PY3K)
- (binaryfunc)@name@_divide, /*nb_divide*/
-#endif
- (binaryfunc)@name@_remainder, /*nb_remainder*/
- (binaryfunc)@name@_divmod, /*nb_divmod*/
- (ternaryfunc)@name@_power, /*nb_power*/
- (unaryfunc)@name@_negative,
- (unaryfunc)@name@_positive, /*nb_pos*/
- (unaryfunc)@name@_absolute, /*nb_abs*/
-#if defined(NPY_PY3K)
- (inquiry)@name@_bool, /*nb_bool*/
-#else
- (inquiry)@name@_nonzero, /*nb_nonzero*/
-#endif
- (unaryfunc)@name@_invert, /*nb_invert*/
- (binaryfunc)@name@_lshift, /*nb_lshift*/
- (binaryfunc)@name@_rshift, /*nb_rshift*/
- (binaryfunc)@name@_and, /*nb_and*/
- (binaryfunc)@name@_xor, /*nb_xor*/
- (binaryfunc)@name@_or, /*nb_or*/
-#if !defined(NPY_PY3K)
- 0, /*nb_coerce*/
-#endif
- (unaryfunc)@name@_int, /*nb_int*/
-#if defined(NPY_PY3K)
- (unaryfunc)0, /*nb_reserved*/
-#else
- (unaryfunc)@name@_long, /*nb_long*/
-#endif
- (unaryfunc)@name@_float, /*nb_float*/
-#if !defined(NPY_PY3K)
- (unaryfunc)@name@_oct, /*nb_oct*/
- (unaryfunc)@name@_hex, /*nb_hex*/
-#endif
- 0, /*inplace_add*/
- 0, /*inplace_subtract*/
- 0, /*inplace_multiply*/
-#if !defined(NPY_PY3K)
- 0, /*inplace_divide*/
-#endif
- 0, /*inplace_remainder*/
- 0, /*inplace_power*/
- 0, /*inplace_lshift*/
- 0, /*inplace_rshift*/
- 0, /*inplace_and*/
- 0, /*inplace_xor*/
- 0, /*inplace_or*/
- (binaryfunc)@name@_floor_divide, /*nb_floor_divide*/
- (binaryfunc)@name@_true_divide, /*nb_true_divide*/
- 0, /*nb_inplace_floor_divide*/
- 0, /*nb_inplace_true_divide*/
- (unaryfunc)NULL, /*nb_index*/
-#if PY_VERSION_HEX >= 0x03050000
- 0, /*nb_matrix_multiply*/
- 0, /*nb_inplace_matrix_multiply*/
-#endif
+ .nb_add = (binaryfunc)@name@_add,
+ .nb_subtract = (binaryfunc)@name@_subtract,
+ .nb_multiply = (binaryfunc)@name@_multiply,
+ .nb_remainder = (binaryfunc)@name@_remainder,
+ .nb_divmod = (binaryfunc)@name@_divmod,
+ .nb_power = (ternaryfunc)@name@_power,
+ .nb_negative = (unaryfunc)@name@_negative,
+ .nb_positive = (unaryfunc)@name@_positive,
+ .nb_absolute = (unaryfunc)@name@_absolute,
+ .nb_bool = (inquiry)@name@_bool,
+ .nb_invert = (unaryfunc)@name@_invert,
+ .nb_lshift = (binaryfunc)@name@_lshift,
+ .nb_rshift = (binaryfunc)@name@_rshift,
+ .nb_and = (binaryfunc)@name@_and,
+ .nb_xor = (binaryfunc)@name@_xor,
+ .nb_or = (binaryfunc)@name@_or,
+ .nb_int = (unaryfunc)@name@_int,
+ .nb_float = (unaryfunc)@name@_float,
+ .nb_floor_divide = (binaryfunc)@name@_floor_divide,
+ .nb_true_divide = (binaryfunc)@name@_true_divide,
+ /* TODO: This struct/initialization should not be split between files */
+ .nb_index = (unaryfunc)NULL, /* set in add_scalarmath below */
};
/**end repeat**/
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 74f52cc9d..4265476b5 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -34,6 +34,21 @@
#define VECTOR_SIZE_BYTES 16
+/*
+ * MAX_STEP_SIZE is used to determine if we need to use SIMD version of the ufunc.
+ * Very large step size can be as slow as processing it using scalar. The
+ * value of 2097152 ( = 2MB) was chosen using 2 considerations:
+ * 1) Typical linux kernel page size is 4Kb, but sometimes it could also be 2MB
+ * which is == 2097152 Bytes. For a step size as large as this, surely all
+ * the loads/stores of gather/scatter instructions falls on 16 different pages
+ * which one would think would slow down gather/scatter instructions.
+ * 2) It additionally satisfies MAX_STEP_SIZE*16/esize < NPY_MAX_INT32 which
+ * allows us to use i32 version of gather/scatter (as opposed to the i64 version)
+ * without problems (step larger than NPY_MAX_INT32*esize/16 would require use of
+ * i64gather/scatter). esize = element size = 4/8 bytes for float/double.
+ */
+#define MAX_STEP_SIZE 2097152
+
static NPY_INLINE npy_uintp
abs_ptrdiff(char *a, char *b)
{
@@ -41,6 +56,44 @@ abs_ptrdiff(char *a, char *b)
}
/*
+ * nomemoverlap - returns true if two strided arrays have an overlapping
+ * region in memory. ip_size/op_size = size of the arrays which can be negative
+ * indicating negative steps.
+ */
+static NPY_INLINE npy_bool
+nomemoverlap(char *ip,
+ npy_intp ip_size,
+ char *op,
+ npy_intp op_size)
+{
+ char *ip_start, *ip_end, *op_start, *op_end;
+ if (ip_size < 0) {
+ ip_start = ip + ip_size;
+ ip_end = ip;
+ }
+ else {
+ ip_start = ip;
+ ip_end = ip + ip_size;
+ }
+ if (op_size < 0) {
+ op_start = op + op_size;
+ op_end = op;
+ }
+ else {
+ op_start = op;
+ op_end = op + op_size;
+ }
+ return (ip_start > op_end) | (op_start > ip_end);
+}
+
+#define IS_BINARY_STRIDE_ONE(esize, vsize) \
+ ((steps[0] == esize) && \
+ (steps[1] == esize) && \
+ (steps[2] == esize) && \
+ (abs_ptrdiff(args[2], args[0]) >= vsize) && \
+ (abs_ptrdiff(args[2], args[1]) >= vsize))
+
+/*
* stride is equal to element size and input and destination are equal or
* don't overlap within one register. The check of the steps against
* esize also quarantees that steps are >= 0.
@@ -52,13 +105,34 @@ abs_ptrdiff(char *a, char *b)
((abs_ptrdiff(args[1], args[0]) == 0))))
/*
- * output should be contiguous, can handle strided input data
+ * Avoid using SIMD for very large step sizes for several reasons:
+ * 1) Supporting large step sizes requires use of i64gather/scatter_ps instructions,
+ * in which case we need two i64gather instructions and an additional vinsertf32x8
+ * instruction to load a single zmm register (since one i64gather instruction
+ * loads into a ymm register). This is not ideal for performance.
+ * 2) Gather and scatter instructions can be slow when the loads/stores
+ * cross page boundaries.
+ *
+ * We instead rely on i32gather/scatter_ps instructions which use a 32-bit index
+ * element. The index needs to be < INT_MAX to avoid overflow. MAX_STEP_SIZE
+ * ensures this. The condition also requires that the input and output arrays
+ * should have no overlap in memory.
+ */
+#define IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP \
+ ((abs(steps[0]) < MAX_STEP_SIZE) && \
+ (abs(steps[1]) < MAX_STEP_SIZE) && \
+ (abs(steps[2]) < MAX_STEP_SIZE) && \
+ (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \
+ (nomemoverlap(args[1], steps[1] * dimensions[0], args[2], steps[2] * dimensions[0])))
+
+/*
+ * 1) Output should be contiguous, can handle strided input data
+ * 2) Input step should be smaller than MAX_STEP_SIZE for performance
+ * 3) Input and output arrays should have no overlap in memory
*/
#define IS_OUTPUT_BLOCKABLE_UNARY(esize, vsize) \
- (steps[1] == (esize) && \
- (npy_is_aligned(args[0], esize) && npy_is_aligned(args[1], esize)) && \
- ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \
- ((abs_ptrdiff(args[1], args[0]) == 0))))
+ (steps[1] == (esize) && abs(steps[0]) < MAX_STEP_SIZE && \
+ (nomemoverlap(args[1], steps[1] * dimensions[0], args[0], steps[0] * dimensions[0])))
#define IS_BLOCKABLE_REDUCE(esize, vsize) \
(steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \
@@ -125,11 +199,109 @@ abs_ptrdiff(char *a, char *b)
/*
*****************************************************************************
+ ** CMPLX DISPATCHERS
+ *****************************************************************************
+ */
+
+/**begin repeat
+ * #TYPE = CFLOAT, CDOUBLE#
+ * #type= npy_float, npy_double#
+ * #esize = 8, 16#
+ */
+
+/**begin repeat1
+ * #func = add, subtract, multiply#
+ */
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE NPY_GCC_TARGET_AVX512F void
+AVX512F_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps);
+#endif
+
+static NPY_INLINE int
+run_binary_avx512f_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps)
+{
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+ if (IS_BINARY_STRIDE_ONE(@esize@, 64)) {
+ AVX512F_@func@_@TYPE@(args, dimensions, steps);
+ return 1;
+ }
+ else
+ return 0;
+#endif
+ return 0;
+}
+
+/**end repeat1**/
+
+/**begin repeat1
+ * #func = square, absolute, conjugate#
+ * #outsize = 1, 2, 1#
+ * #max_stride = 2, 8, 8#
+ */
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE NPY_GCC_TARGET_AVX512F void
+AVX512F_@func@_@TYPE@(@type@*, @type@*, const npy_intp n, const npy_intp stride);
+#endif
+
+static NPY_INLINE int
+run_unary_avx512f_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps)
+{
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+ if ((IS_OUTPUT_BLOCKABLE_UNARY((npy_uint)(@esize@/@outsize@), 64)) && (labs(steps[0]) < 2*@max_stride@*@esize@)) {
+ AVX512F_@func@_@TYPE@((@type@*)args[1], (@type@*)args[0], dimensions[0], steps[0]);
+ return 1;
+ }
+ else
+ return 0;
+#endif
+ return 0;
+}
+
+/**end repeat1**/
+/**end repeat**/
+
+/*
+ *****************************************************************************
** FLOAT DISPATCHERS
*****************************************************************************
*/
/**begin repeat
+ * #type = npy_float, npy_double, npy_longdouble#
+ * #TYPE = FLOAT, DOUBLE, LONGDOUBLE#
+ * #EXISTS = 1, 1, 0#
+ */
+
+/**begin repeat1
+ * #func = maximum, minimum#
+ */
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS && @EXISTS@
+static NPY_INLINE NPY_GCC_TARGET_AVX512F void
+AVX512F_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps);
+#endif
+
+static NPY_INLINE int
+run_binary_avx512f_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps)
+{
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS && @EXISTS@
+ if (IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP) {
+ AVX512F_@func@_@TYPE@(args, dimensions, steps);
+ return 1;
+ }
+ else
+ return 0;
+#endif
+ return 0;
+}
+
+
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
* #ISA = FMA, AVX512F#
* #isa = fma, avx512f#
* #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
@@ -153,7 +325,7 @@ static NPY_INLINE NPY_GCC_TARGET_@ISA@ void
#endif
static NPY_INLINE int
-run_unary_@isa@_@func@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+run_unary_@isa@_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(@type@), @REGISTER_SIZE@)) {
@@ -179,7 +351,7 @@ static NPY_INLINE void
#endif
static NPY_INLINE int
-run_unary_@isa@_@func@_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps)
+run_unary_@isa@_@func@_FLOAT(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), @REGISTER_SIZE@)) {
@@ -200,7 +372,7 @@ static NPY_INLINE void
#endif
static NPY_INLINE int
-run_unary_@isa@_sincos_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps, NPY_TRIG_OP my_trig_op)
+run_unary_@isa@_sincos_FLOAT(char **args, npy_intp const *dimensions, npy_intp const *steps, NPY_TRIG_OP my_trig_op)
{
#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), @REGISTER_SIZE@)) {
@@ -238,7 +410,7 @@ sse2_@func@_@TYPE@(@type@ *, @type@ *, const npy_intp n);
#endif
static NPY_INLINE int
-run_@name@_simd_@func@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+run_@name@_simd_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
if (@check@(sizeof(@type@), VECTOR_SIZE_BYTES)) {
@@ -272,7 +444,7 @@ sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2,
#endif
static NPY_INLINE int
-run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+run_binary_simd_@kind@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
@type@ * ip1 = (@type@ *)args[0];
@@ -280,11 +452,11 @@ run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps
@type@ * op = (@type@ *)args[2];
npy_intp n = dimensions[0];
#if defined __AVX512F__
- const npy_intp vector_size_bytes = 64;
+ const npy_uintp vector_size_bytes = 64;
#elif defined __AVX2__
- const npy_intp vector_size_bytes = 32;
+ const npy_uintp vector_size_bytes = 32;
#else
- const npy_intp vector_size_bytes = 32;
+ const npy_uintp vector_size_bytes = 32;
#endif
/* argument one scalar */
if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), vector_size_bytes)) {
@@ -328,7 +500,7 @@ sse2_binary_scalar2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2,
#endif
static NPY_INLINE int
-run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+run_binary_simd_@kind@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
#if @vector@ && @simd@ && defined NPY_HAVE_SSE2_INTRINSICS
@type@ * ip1 = (@type@ *)args[0];
@@ -367,7 +539,7 @@ sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n);
#endif
static NPY_INLINE int
-run_@kind@_simd_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+run_@kind@_simd_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
if (steps[0] == sizeof(@type@) && steps[1] == 1 &&
@@ -403,7 +575,7 @@ sse2_reduce_@kind@_BOOL(npy_bool * op, npy_bool * ip, npy_intp n);
#endif
static NPY_INLINE int
-run_binary_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
+run_binary_simd_@kind@_BOOL(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
#if defined NPY_HAVE_SSE2_INTRINSICS
if (sizeof(npy_bool) == 1 &&
@@ -418,7 +590,7 @@ run_binary_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
static NPY_INLINE int
-run_reduce_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
+run_reduce_simd_@kind@_BOOL(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
#if defined NPY_HAVE_SSE2_INTRINSICS
if (sizeof(npy_bool) == 1 &&
@@ -443,7 +615,7 @@ sse2_@kind@_BOOL(npy_bool *, npy_bool *, const npy_intp n);
#endif
static NPY_INLINE int
-run_unary_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
+run_unary_simd_@kind@_BOOL(char **args, npy_intp const *dimensions, npy_intp const *steps)
{
#if defined NPY_HAVE_SSE2_INTRINSICS
if (sizeof(npy_bool) == 1 &&
@@ -1134,7 +1306,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
/* Order of operations important for MSVC 2015 */
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
- assert((npy_uintp)n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES));
+ assert(n < stride || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES));
if (i + 3 * stride <= n) {
/* load the first elements */
@vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@@ -1525,9 +1697,17 @@ avx512_scalef_ps(__m512 poly, __m512 quadrant)
}
/**begin repeat
* #vsub = ps, pd#
+ * #type= npy_float, npy_double#
* #epi_vsub = epi32, epi64#
* #vtype = __m512, __m512d#
+ * #mask = __mmask16, __mmask8#
* #and_const = 0x7fffffff, 0x7fffffffffffffffLL#
+ * #neg_mask = 0x80000000, 0x8000000000000000#
+ * #perm_ = 0xb1, 0x55#
+ * #cmpx_img_mask = 0xAAAA, 0xAA#
+ * #cmpx_re_mask = 0x5555, 0x55#
+ * #INF = NPY_INFINITYF, NPY_INFINITY#
+ * #NAN = NPY_NANF, NPY_NAN#
*/
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
avx512_abs_@vsub@(@vtype@ x)
@@ -1565,6 +1745,96 @@ avx512_trunc_@vsub@(@vtype@ x)
{
return _mm512_roundscale_@vsub@(x, 0x0B);
}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_hadd_@vsub@(const @vtype@ x)
+{
+ return _mm512_add_@vsub@(x, _mm512_permute_@vsub@(x, @perm_@));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_hsub_@vsub@(const @vtype@ x)
+{
+ return _mm512_sub_@vsub@(x, _mm512_permute_@vsub@(x, @perm_@));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_cabsolute_@vsub@(const @vtype@ x1,
+ const @vtype@ x2,
+ const __m512i re_indices,
+ const __m512i im_indices)
+{
+ @vtype@ inf = _mm512_set1_@vsub@(@INF@);
+ @vtype@ nan = _mm512_set1_@vsub@(@NAN@);
+ @vtype@ x1_abs = avx512_abs_@vsub@(x1);
+ @vtype@ x2_abs = avx512_abs_@vsub@(x2);
+ @vtype@ re = _mm512_permutex2var_@vsub@(x1_abs, re_indices, x2_abs);
+ @vtype@ im = _mm512_permutex2var_@vsub@(x1_abs, im_indices , x2_abs);
+ /*
+ * If real or imag = INF, then convert it to inf + j*inf
+ * Handles: inf + j*nan, nan + j*inf
+ */
+ @mask@ re_infmask = _mm512_cmp_@vsub@_mask(re, inf, _CMP_EQ_OQ);
+ @mask@ im_infmask = _mm512_cmp_@vsub@_mask(im, inf, _CMP_EQ_OQ);
+ im = _mm512_mask_mov_@vsub@(im, re_infmask, inf);
+ re = _mm512_mask_mov_@vsub@(re, im_infmask, inf);
+
+ /*
+ * If real or imag = NAN, then convert it to nan + j*nan
+ * Handles: x + j*nan, nan + j*x
+ */
+ @mask@ re_nanmask = _mm512_cmp_@vsub@_mask(re, re, _CMP_NEQ_UQ);
+ @mask@ im_nanmask = _mm512_cmp_@vsub@_mask(im, im, _CMP_NEQ_UQ);
+ im = _mm512_mask_mov_@vsub@(im, re_nanmask, nan);
+ re = _mm512_mask_mov_@vsub@(re, im_nanmask, nan);
+
+ @vtype@ larger = _mm512_max_@vsub@(re, im);
+ @vtype@ smaller = _mm512_min_@vsub@(im, re);
+
+ /*
+ * Calculate div_mask to prevent 0./0. and inf/inf operations in div
+ */
+ @mask@ zeromask = _mm512_cmp_@vsub@_mask(larger, _mm512_setzero_@vsub@(), _CMP_EQ_OQ);
+ @mask@ infmask = _mm512_cmp_@vsub@_mask(smaller, inf, _CMP_EQ_OQ);
+ @mask@ div_mask = _mm512_knot(_mm512_kor(zeromask, infmask));
+ @vtype@ ratio = _mm512_maskz_div_@vsub@(div_mask, smaller, larger);
+ @vtype@ hypot = _mm512_sqrt_@vsub@(_mm512_fmadd_@vsub@(
+ ratio, ratio, _mm512_set1_@vsub@(1.0f)));
+ return _mm512_mul_@vsub@(hypot, larger);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_conjugate_@vsub@(const @vtype@ x)
+{
+ /*
+ * __mm512_mask_xor_ps/pd requires AVX512DQ. We cast it to __m512i and
+ * use the xor_epi32/64 uinstruction instead. Cast is a zero latency instruction
+ */
+ __m512i cast_x = _mm512_cast@vsub@_si512(x);
+ __m512i res = _mm512_mask_xor_@epi_vsub@(cast_x, @cmpx_img_mask@,
+ cast_x, _mm512_set1_@epi_vsub@(@neg_mask@));
+ return _mm512_castsi512_@vsub@(res);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_cmul_@vsub@(@vtype@ x1, @vtype@ x2)
+{
+ // x1 = r1, i1
+ // x2 = r2, i2
+ @vtype@ x3 = _mm512_permute_@vsub@(x2, @perm_@); // i2, r2
+ @vtype@ x12 = _mm512_mul_@vsub@(x1, x2); // r1*r2, i1*i2
+ @vtype@ x13 = _mm512_mul_@vsub@(x1, x3); // r1*i2, r2*i1
+ @vtype@ outreal = avx512_hsub_@vsub@(x12); // r1*r2 - i1*i2, r1*r2 - i1*i2
+ @vtype@ outimg = avx512_hadd_@vsub@(x13); // r1*i2 + i1*r2, r1*i2 + i1*r2
+ return _mm512_mask_blend_@vsub@(@cmpx_img_mask@, outreal, outimg);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_csquare_@vsub@(@vtype@ x)
+{
+ return avx512_cmul_@vsub@(x, x);
+}
+
/**end repeat**/
#endif
@@ -1671,6 +1941,101 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@d
#endif
/**end repeat**/
+/**begin repeat
+ * #type = npy_float, npy_double#
+ * #TYPE = FLOAT, DOUBLE#
+ * #num_lanes = 16, 8#
+ * #vsuffix = ps, pd#
+ * #mask = __mmask16, __mmask8#
+ * #vtype = __m512, __m512d#
+ * #scale = 4, 8#
+ * #vindextype = __m512i, __m256i#
+ * #vindexsize = 512, 256#
+ * #vindexload = _mm512_loadu_si512, _mm256_loadu_si256#
+ */
+
+/**begin repeat1
+ * #func = maximum, minimum#
+ * #vectorf = max, min#
+ */
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE NPY_GCC_TARGET_AVX512F void
+AVX512F_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps)
+{
+ const npy_intp stride_ip1 = steps[0]/(npy_intp)sizeof(@type@);
+ const npy_intp stride_ip2 = steps[1]/(npy_intp)sizeof(@type@);
+ const npy_intp stride_op = steps[2]/(npy_intp)sizeof(@type@);
+ const npy_intp array_size = dimensions[0];
+ npy_intp num_remaining_elements = array_size;
+ @type@* ip1 = (@type@*) args[0];
+ @type@* ip2 = (@type@*) args[1];
+ @type@* op = (@type@*) args[2];
+
+ @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@();
+
+ /*
+ * Note: while generally indices are npy_intp, we ensure that our maximum index
+ * will fit in an int32 as a precondition for this function via
+ * IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP
+ */
+
+ npy_int32 index_ip1[@num_lanes@], index_ip2[@num_lanes@], index_op[@num_lanes@];
+ for (npy_int32 ii = 0; ii < @num_lanes@; ii++) {
+ index_ip1[ii] = ii*stride_ip1;
+ index_ip2[ii] = ii*stride_ip2;
+ index_op[ii] = ii*stride_op;
+ }
+ @vindextype@ vindex_ip1 = @vindexload@((@vindextype@*)&index_ip1[0]);
+ @vindextype@ vindex_ip2 = @vindexload@((@vindextype@*)&index_ip2[0]);
+ @vindextype@ vindex_op = @vindexload@((@vindextype@*)&index_op[0]);
+ @vtype@ zeros_f = _mm512_setzero_@vsuffix@();
+
+ while (num_remaining_elements > 0) {
+ if (num_remaining_elements < @num_lanes@) {
+ load_mask = avx512_get_partial_load_mask_@vsuffix@(
+ num_remaining_elements, @num_lanes@);
+ }
+ @vtype@ x1, x2;
+ if (stride_ip1 == 1) {
+ x1 = avx512_masked_load_@vsuffix@(load_mask, ip1);
+ }
+ else {
+ x1 = avx512_masked_gather_@vsuffix@(zeros_f, ip1, vindex_ip1, load_mask);
+ }
+ if (stride_ip2 == 1) {
+ x2 = avx512_masked_load_@vsuffix@(load_mask, ip2);
+ }
+ else {
+ x2 = avx512_masked_gather_@vsuffix@(zeros_f, ip2, vindex_ip2, load_mask);
+ }
+
+ /*
+ * when only one of the argument is a nan, the maxps/maxpd instruction
+ * returns the second argument. The additional blend instruction fixes
+ * this issue to conform with NumPy behaviour.
+ */
+ @mask@ nan_mask = _mm512_cmp_@vsuffix@_mask(x1, x1, _CMP_NEQ_UQ);
+ @vtype@ out = _mm512_@vectorf@_@vsuffix@(x1, x2);
+ out = _mm512_mask_blend_@vsuffix@(nan_mask, out, x1);
+
+ if (stride_op == 1) {
+ _mm512_mask_storeu_@vsuffix@(op, load_mask, out);
+ }
+ else {
+ /* scatter! */
+ _mm512_mask_i32scatter_@vsuffix@(op, load_mask, vindex_op, out, @scale@);
+ }
+
+ ip1 += @num_lanes@*stride_ip1;
+ ip2 += @num_lanes@*stride_ip2;
+ op += @num_lanes@*stride_op;
+ num_remaining_elements -= @num_lanes@;
+ }
+}
+#endif
+/**end repeat**/
+/**end repeat1**/
/**begin repeat
* #ISA = FMA, AVX512F#
@@ -1699,16 +2064,23 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
const npy_intp array_size,
const npy_intp steps)
{
- const npy_intp stride = steps/sizeof(npy_float);
- const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ const npy_intp stride = steps/(npy_intp)sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/(npy_intp)sizeof(npy_float);
npy_intp num_remaining_elements = array_size;
@vtype@ ones_f = _mm@vsize@_set1_ps(1.0f);
@mask@ load_mask = @isa@_get_full_load_mask_ps();
#if @replace_0_with_1@
@mask@ inv_load_mask = @isa@_invert_mask_ps(load_mask);
#endif
- npy_int indexarr[16];
- for (npy_int ii = 0; ii < 16; ii++) {
+
+ /*
+ * Note: while generally indices are npy_intp, we ensure that our maximum index
+ * will fit in an int32 as a precondition for this function via
+ * IS_OUTPUT_BLOCKABLE_UNARY
+ */
+
+ npy_int32 indexarr[16];
+ for (npy_int32 ii = 0; ii < 16; ii++) {
indexarr[ii] = ii*stride;
}
@vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
@@ -1778,16 +2150,22 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
const npy_intp array_size,
const npy_intp steps)
{
- const npy_intp stride = steps/sizeof(npy_double);
- const npy_int num_lanes = @BYTES@/sizeof(npy_double);
+ const npy_intp stride = steps/(npy_intp)sizeof(npy_double);
+ const npy_int num_lanes = @BYTES@/(npy_intp)sizeof(npy_double);
npy_intp num_remaining_elements = array_size;
@mask@ load_mask = @isa@_get_full_load_mask_pd();
#if @replace_0_with_1@
@mask@ inv_load_mask = @isa@_invert_mask_pd(load_mask);
#endif
@vtype@ ones_d = _mm@vsize@_set1_pd(1.0f);
- npy_int indexarr[8];
- for (npy_int ii = 0; ii < 8; ii++) {
+
+ /*
+ * Note: while generally indices are npy_intp, we ensure that our maximum index
+ * will fit in an int32 as a precondition for this function via
+ * IS_OUTPUT_BLOCKABLE_UNARY
+ */
+ npy_int32 indexarr[8];
+ for (npy_int32 ii = 0; ii < 8; ii++) {
indexarr[ii] = ii*stride;
}
@vindextype@ vindex = @vindexload@((@vindextype@*)&indexarr[0]);
@@ -1856,7 +2234,7 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
* method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4].
* (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k =
* int(y).
- * (4) For elements outside that range, Cody-Waite reduction peforms poorly
+ * (4) For elements outside that range, Cody-Waite reduction performs poorly
* leading to catastrophic cancellation. We compute cosine by calling glibc in
* a scalar fashion.
* (5) Vectorized implementation has a max ULP of 1.49 and performs at least
@@ -1874,8 +2252,8 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
const npy_intp steps,
NPY_TRIG_OP my_trig_op)
{
- const npy_intp stride = steps/sizeof(npy_float);
- const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ const npy_intp stride = steps/(npy_intp)sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/(npy_intp)sizeof(npy_float);
npy_float large_number = 71476.0625f;
if (my_trig_op == npy_compute_sin) {
large_number = 117435.992f;
@@ -1905,8 +2283,14 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
@mask@ nan_mask, glibc_mask, sine_mask, negate_mask;
@mask@ load_mask = @isa@_get_full_load_mask_ps();
npy_intp num_remaining_elements = array_size;
- npy_int indexarr[16];
- for (npy_int ii = 0; ii < 16; ii++) {
+
+ /*
+ * Note: while generally indices are npy_intp, we ensure that our maximum index
+ * will fit in an int32 as a precondition for this function via
+ * IS_OUTPUT_BLOCKABLE_UNARY
+ */
+ npy_int32 indexarr[16];
+ for (npy_int32 ii = 0; ii < 16; ii++) {
indexarr[ii] = ii*stride;
}
@vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
@@ -2017,12 +2401,18 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
const npy_intp array_size,
const npy_intp steps)
{
- const npy_intp stride = steps/sizeof(npy_float);
- const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ const npy_intp stride = steps/(npy_intp)sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/(npy_intp)sizeof(npy_float);
npy_float xmax = 88.72283935546875f;
npy_float xmin = -103.97208404541015625f;
- npy_int indexarr[16];
- for (npy_int ii = 0; ii < 16; ii++) {
+
+ /*
+ * Note: while generally indices are npy_intp, we ensure that our maximum index
+ * will fit in an int32 as a precondition for this function via
+ * IS_OUTPUT_BLOCKABLE_UNARY
+ */
+ npy_int32 indexarr[16];
+ for (npy_int32 ii = 0; ii < 16; ii++) {
indexarr[ii] = ii*stride;
}
@@ -2143,10 +2533,16 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
const npy_intp array_size,
const npy_intp steps)
{
- const npy_intp stride = steps/sizeof(npy_float);
- const npy_int num_lanes = @BYTES@/sizeof(npy_float);
- npy_int indexarr[16];
- for (npy_int ii = 0; ii < 16; ii++) {
+ const npy_intp stride = steps/(npy_intp)sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/(npy_intp)sizeof(npy_float);
+
+ /*
+ * Note: while generally indices are npy_intp, we ensure that our maximum index
+ * will fit in an int32 as a precondition for this function via
+ * IS_OUTPUT_BLOCKABLE_UNARY
+ */
+ npy_int32 indexarr[16];
+ for (npy_int32 ii = 0; ii < 16; ii++) {
indexarr[ii] = ii*stride;
}
@@ -2258,6 +2654,184 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
#endif
/**end repeat**/
+/**begin repeat
+ * #TYPE = CFLOAT, CDOUBLE#
+ * #type = npy_float, npy_double#
+ * #num_lanes = 16, 8#
+ * #vsuffix = ps, pd#
+ * #epi_vsub = epi32, epi64#
+ * #mask = __mmask16, __mmask8#
+ * #vtype = __m512, __m512d#
+ * #scale = 4, 8#
+ * #vindextype = __m512i, __m256i#
+ * #vindexload = _mm512_loadu_si512, _mm256_loadu_si256#
+ * #storemask = 0xFF, 0xF#
+ * #IS_FLOAT = 1, 0#
+ */
+
+/**begin repeat1
+ * #func = add, subtract, multiply#
+ * #vectorf = _mm512_add, _mm512_sub, avx512_cmul#
+ */
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_GCC_OPT_3 NPY_INLINE NPY_GCC_TARGET_AVX512F void
+AVX512F_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps)
+{
+ const npy_intp array_size = dimensions[0];
+ npy_intp num_remaining_elements = 2*array_size;
+ @type@* ip1 = (@type@*) args[0];
+ @type@* ip2 = (@type@*) args[1];
+ @type@* op = (@type@*) args[2];
+
+ @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@();
+
+ while (num_remaining_elements > 0) {
+ if (num_remaining_elements < @num_lanes@) {
+ load_mask = avx512_get_partial_load_mask_@vsuffix@(
+ num_remaining_elements, @num_lanes@);
+ }
+ @vtype@ x1, x2;
+ x1 = avx512_masked_load_@vsuffix@(load_mask, ip1);
+ x2 = avx512_masked_load_@vsuffix@(load_mask, ip2);
+
+ @vtype@ out = @vectorf@_@vsuffix@(x1, x2);
+
+ _mm512_mask_storeu_@vsuffix@(op, load_mask, out);
+
+ ip1 += @num_lanes@;
+ ip2 += @num_lanes@;
+ op += @num_lanes@;
+ num_remaining_elements -= @num_lanes@;
+ }
+}
+#endif
+/**end repeat1**/
+
+/**begin repeat1
+ * #func = square, conjugate#
+ * #vectorf = avx512_csquare, avx512_conjugate#
+ */
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_GCC_OPT_3 NPY_INLINE NPY_GCC_TARGET_AVX512F void
+AVX512F_@func@_@TYPE@(@type@ * op,
+ @type@ * ip,
+ const npy_intp array_size,
+ const npy_intp steps)
+{
+ npy_intp num_remaining_elements = 2*array_size;
+ const npy_intp stride_ip1 = steps/(npy_intp)sizeof(@type@)/2;
+
+ /*
+ * Note: while generally indices are npy_intp, we ensure that our maximum index
+ * will fit in an int32 as a precondition for this function via max_stride
+ */
+ npy_int32 index_ip1[16];
+ for (npy_int32 ii = 0; ii < @num_lanes@; ii=ii+2) {
+ index_ip1[ii] = ii*stride_ip1;
+ index_ip1[ii+1] = ii*stride_ip1 + 1;
+ }
+ @vindextype@ vindex = @vindexload@((@vindextype@*)index_ip1);
+ @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@();
+ @vtype@ zeros = _mm512_setzero_@vsuffix@();
+
+ while (num_remaining_elements > 0) {
+ if (num_remaining_elements < @num_lanes@) {
+ load_mask = avx512_get_partial_load_mask_@vsuffix@(
+ num_remaining_elements, @num_lanes@);
+ }
+ @vtype@ x1;
+ if (stride_ip1 == 1) {
+ x1 = avx512_masked_load_@vsuffix@(load_mask, ip);
+ }
+ else {
+ x1 = avx512_masked_gather_@vsuffix@(zeros, ip, vindex, load_mask);
+ }
+
+ @vtype@ out = @vectorf@_@vsuffix@(x1);
+
+ _mm512_mask_storeu_@vsuffix@(op, load_mask, out);
+ op += @num_lanes@;
+ ip += @num_lanes@*stride_ip1;
+ num_remaining_elements -= @num_lanes@;
+ }
+}
+#endif
+/**end repeat1**/
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_GCC_OPT_3 NPY_INLINE NPY_GCC_TARGET_AVX512F void
+AVX512F_absolute_@TYPE@(@type@ * op,
+ @type@ * ip,
+ const npy_intp array_size,
+ const npy_intp steps)
+{
+ npy_intp num_remaining_elements = 2*array_size;
+ const npy_intp stride_ip1 = steps/(npy_intp)sizeof(@type@)/2;
+
+ /*
+ * Note: while generally indices are npy_intp, we ensure that our maximum index
+ * will fit in an int32 as a precondition for this function via max_stride
+ */
+ npy_int32 index_ip[32];
+ for (npy_int32 ii = 0; ii < 2*@num_lanes@; ii=ii+2) {
+ index_ip[ii] = ii*stride_ip1;
+ index_ip[ii+1] = ii*stride_ip1 + 1;
+ }
+ @vindextype@ vindex1 = @vindexload@((@vindextype@*)index_ip);
+ @vindextype@ vindex2 = @vindexload@((@vindextype@*)(index_ip+@num_lanes@));
+
+ @mask@ load_mask1 = avx512_get_full_load_mask_@vsuffix@();
+ @mask@ load_mask2 = avx512_get_full_load_mask_@vsuffix@();
+ @mask@ store_mask = avx512_get_full_load_mask_@vsuffix@();
+ @vtype@ zeros = _mm512_setzero_@vsuffix@();
+
+#if @IS_FLOAT@
+ __m512i re_index = _mm512_set_epi32(30,28,26,24,22,20,18,16,14,12,10,8,6,4,2,0);
+ __m512i im_index = _mm512_set_epi32(31,29,27,25,23,21,19,17,15,13,11,9,7,5,3,1);
+#else
+ __m512i re_index = _mm512_set_epi64(14,12,10,8,6,4,2,0);
+ __m512i im_index = _mm512_set_epi64(15,13,11,9,7,5,3,1);
+#endif
+
+ while (num_remaining_elements > 0) {
+ if (num_remaining_elements < @num_lanes@) {
+ load_mask1 = avx512_get_partial_load_mask_@vsuffix@(
+ num_remaining_elements, @num_lanes@);
+ load_mask2 = 0x0000;
+ store_mask = avx512_get_partial_load_mask_@vsuffix@(
+ num_remaining_elements/2, @num_lanes@);
+ } else if (num_remaining_elements < 2*@num_lanes@) {
+ load_mask1 = avx512_get_full_load_mask_@vsuffix@();
+ load_mask2 = avx512_get_partial_load_mask_@vsuffix@(
+ num_remaining_elements - @num_lanes@, @num_lanes@);
+ store_mask = avx512_get_partial_load_mask_@vsuffix@(
+ num_remaining_elements/2, @num_lanes@);
+ }
+ @vtype@ x1, x2;
+ if (stride_ip1 == 1) {
+ x1 = avx512_masked_load_@vsuffix@(load_mask1, ip);
+ x2 = avx512_masked_load_@vsuffix@(load_mask2, ip+@num_lanes@);
+ }
+ else {
+ x1 = avx512_masked_gather_@vsuffix@(zeros, ip, vindex1, load_mask1);
+ x2 = avx512_masked_gather_@vsuffix@(zeros, ip, vindex2, load_mask2);
+ }
+
+ @vtype@ out = avx512_cabsolute_@vsuffix@(x1, x2, re_index, im_index);
+
+ _mm512_mask_storeu_@vsuffix@(op, store_mask, out);
+ op += @num_lanes@;
+ ip += 2*@num_lanes@*stride_ip1;
+ num_remaining_elements -= 2*@num_lanes@;
+ }
+ npy_clear_floatstatus_barrier((char*)op);
+}
+
+#endif
+/**end repeat**/
+
/*
*****************************************************************************
** BOOL LOOPS
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index e4ad3dc84..f34fbaf7f 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -324,7 +324,7 @@ _find_array_prepare(ufunc_full_args args,
NPY_NO_EXPORT int
set_matmul_flags(PyObject *d)
{
- PyObject *matmul = PyDict_GetItemString(d, "matmul");
+ PyObject *matmul = _PyDict_GetItemStringWithError(d, "matmul");
if (matmul == NULL) {
return -1;
}
@@ -397,7 +397,7 @@ _ufunc_setup_flags(PyUFuncObject *ufunc, npy_uint32 op_in_flags,
* A NULL is placed in output_wrap for outputs that
* should just have PyArray_Return called.
*/
-static void
+static int
_find_array_wrap(ufunc_full_args args, PyObject *kwds,
PyObject **output_wrap, int nin, int nout)
{
@@ -409,9 +409,12 @@ _find_array_wrap(ufunc_full_args args, PyObject *kwds,
* If a 'subok' parameter is passed and isn't True, don't wrap but put None
* into slots with out arguments which means return the out argument
*/
- if (kwds != NULL && (obj = PyDict_GetItem(kwds,
- npy_um_str_subok)) != NULL) {
- if (obj != Py_True) {
+ if (kwds != NULL) {
+ obj = PyDict_GetItemWithError(kwds, npy_um_str_subok);
+ if (obj == NULL && PyErr_Occurred()) {
+ return -1;
+ }
+ else if (obj != NULL && obj != Py_True) {
/* skip search for wrap members */
goto handle_out;
}
@@ -450,7 +453,7 @@ handle_out:
}
Py_XDECREF(wrap);
- return;
+ return 0;
}
@@ -929,22 +932,9 @@ parse_ufunc_keywords(PyUFuncObject *ufunc, PyObject *kwds, PyObject **kwnames, .
}
}
else {
-#if PY_VERSION_HEX >= 0x03000000
PyErr_Format(PyExc_TypeError,
"'%S' is an invalid keyword to ufunc '%s'",
key, ufunc_get_name_cstr(ufunc));
-#else
- char *str = PyString_AsString(key);
- if (str == NULL) {
- PyErr_Clear();
- PyErr_SetString(PyExc_TypeError, "invalid keyword argument");
- }
- else {
- PyErr_Format(PyExc_TypeError,
- "'%s' is an invalid keyword to ufunc '%s'",
- str, ufunc_get_name_cstr(ufunc));
- }
-#endif
return -1;
}
}
@@ -1044,7 +1034,7 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
int nin = ufunc->nin;
int nout = ufunc->nout;
int nop = ufunc->nargs;
- PyObject *obj, *context;
+ PyObject *obj;
PyArray_Descr *dtype = NULL;
/*
* Initialize output objects so caller knows when outputs and optional
@@ -1081,22 +1071,8 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
out_op[i] = (PyArrayObject *)PyArray_FromArray(obj_a, NULL, 0);
}
else {
- if (!PyArray_IsScalar(obj, Generic)) {
- /*
- * TODO: There should be a comment here explaining what
- * context does.
- */
- context = Py_BuildValue("OOi", ufunc, args, i);
- if (context == NULL) {
- goto fail;
- }
- }
- else {
- context = NULL;
- }
out_op[i] = (PyArrayObject *)PyArray_FromAny(obj,
- NULL, 0, 0, 0, context);
- Py_XDECREF(context);
+ NULL, 0, 0, 0, NULL);
}
if (out_op[i] == NULL) {
@@ -1941,7 +1917,15 @@ make_full_arg_tuple(
}
/* Look for output keyword arguments */
- out_kwd = kwds ? PyDict_GetItem(kwds, npy_um_str_out) : NULL;
+ if (kwds) {
+ out_kwd = PyDict_GetItemWithError(kwds, npy_um_str_out);
+ if (out_kwd == NULL && PyErr_Occurred()) {
+ goto fail;
+ }
+ }
+ else {
+ out_kwd = NULL;
+ }
if (out_kwd != NULL) {
assert(nargs == nin);
@@ -3068,17 +3052,15 @@ fail:
return retval;
}
-/*UFUNC_API
- *
+/*
* This generic function is called with the ufunc object, the arguments to it,
* and an array of (pointers to) PyArrayObjects which are NULL.
*
* 'op' is an array of at least NPY_MAXARGS PyArrayObject *.
*/
-NPY_NO_EXPORT int
-PyUFunc_GenericFunction(PyUFuncObject *ufunc,
- PyObject *args, PyObject *kwds,
- PyArrayObject **op)
+static int
+PyUFunc_GenericFunction_int(PyUFuncObject *ufunc,
+ PyObject *args, PyObject *kwds, PyArrayObject **op)
{
int nin, nout;
int i, nop;
@@ -3284,6 +3266,27 @@ fail:
return retval;
}
+
+/*UFUNC_API*/
+NPY_NO_EXPORT int
+PyUFunc_GenericFunction(PyUFuncObject *ufunc,
+ PyObject *args, PyObject *kwds, PyArrayObject **op)
+{
+ /* NumPy 1.19, 2020-01-24 */
+ if (DEPRECATE(
+ "PyUFunc_GenericFunction() C-API function is deprecated "
+ "and expected to be removed rapidly. If you are using it (i.e. see "
+ "this warning/error), please notify the NumPy developers. "
+ "As of now it is expected that any use case is served better by "
+ "the direct use of `PyObject_Call(ufunc, args, kwargs)`. "
+ "PyUFunc_GenericFunction function has slightly different "
+ "untested behaviour.") < 0) {
+ return -1;
+ }
+ return PyUFunc_GenericFunction_int(ufunc, args, kwds, op);
+}
+
+
/*
* Given the output type, finds the specified binary op. The
* ufunc must have nin==2 and nout==1. The function may modify
@@ -3309,9 +3312,12 @@ get_binary_op_function(PyUFuncObject *ufunc, int *otype,
if (key == NULL) {
return -1;
}
- obj = PyDict_GetItem(ufunc->userloops, key);
+ obj = PyDict_GetItemWithError(ufunc->userloops, key);
Py_DECREF(key);
- if (obj != NULL) {
+ if (obj == NULL && PyErr_Occurred()) {
+ return -1;
+ }
+ else if (obj != NULL) {
funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
while (funcdata != NULL) {
int *types = funcdata->arg_types;
@@ -3437,8 +3443,8 @@ reduce_type_resolver(PyUFuncObject *ufunc, PyArrayObject *arr,
}
static int
-reduce_loop(NpyIter *iter, char **dataptrs, npy_intp *strides,
- npy_intp *countptr, NpyIter_IterNextFunc *iternext,
+reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides,
+ npy_intp const *countptr, NpyIter_IterNextFunc *iternext,
int needs_api, npy_intp skip_first_count, void *data)
{
PyArray_Descr *dtypes[3], **iter_dtypes;
@@ -3870,8 +3876,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
stride_copy[1] = stride1;
stride_copy[2] = stride0;
- needs_api = NpyIter_IterationNeedsAPI(iter);
-
NPY_BEGIN_THREADS_NDITER(iter);
do {
@@ -4058,8 +4062,8 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
for (i = 0; i < ind_size; ++i) {
if (reduceat_ind[i] < 0 || reduceat_ind[i] >= red_axis_size) {
PyErr_Format(PyExc_IndexError,
- "index %d out-of-bounds in %s.%s [0, %d)",
- (int)reduceat_ind[i], ufunc_name, opname, (int)red_axis_size);
+ "index %" NPY_INTP_FMT " out-of-bounds in %s.%s [0, %" NPY_INTP_FMT ")",
+ reduceat_ind[i], ufunc_name, opname, red_axis_size);
return NULL;
}
}
@@ -4396,7 +4400,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
PyObject *axes_in = NULL;
PyArrayObject *mp = NULL, *wheremask = NULL, *ret = NULL;
PyObject *op;
- PyObject *obj_ind, *context;
+ PyObject *obj_ind;
PyArrayObject *indices = NULL;
PyArray_Descr *otype = NULL;
PyArrayObject *out = NULL;
@@ -4435,8 +4439,11 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
}
/* if there is a tuple of 1 for `out` in kwds, unpack it */
if (kwds != NULL) {
- PyObject *out_obj = PyDict_GetItem(kwds, npy_um_str_out);
- if (out_obj != NULL && PyTuple_CheckExact(out_obj)) {
+ PyObject *out_obj = PyDict_GetItemWithError(kwds, npy_um_str_out);
+ if (out_obj == NULL && PyErr_Occurred()){
+ return NULL;
+ }
+ else if (out_obj != NULL && PyTuple_CheckExact(out_obj)) {
if (PyTuple_GET_SIZE(out_obj) != 1) {
PyErr_SetString(PyExc_ValueError,
"The 'out' tuple must have exactly one entry");
@@ -4487,14 +4494,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
}
}
/* Ensure input is an array */
- if (!PyArray_Check(op) && !PyArray_IsScalar(op, Generic)) {
- context = Py_BuildValue("O(O)i", ufunc, op, 0);
- }
- else {
- context = NULL;
- }
- mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context);
- Py_XDECREF(context);
+ mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, NULL);
if (mp == NULL) {
goto fail;
}
@@ -4698,7 +4698,7 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
return override;
}
- errval = PyUFunc_GenericFunction(ufunc, args, kwds, mps);
+ errval = PyUFunc_GenericFunction_int(ufunc, args, kwds, mps);
if (errval < 0) {
return NULL;
}
@@ -4728,7 +4728,9 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
if (make_full_arg_tuple(&full_args, ufunc->nin, ufunc->nout, args, kwds) < 0) {
goto fail;
}
- _find_array_wrap(full_args, kwds, wraparr, ufunc->nin, ufunc->nout);
+ if (_find_array_wrap(full_args, kwds, wraparr, ufunc->nin, ufunc->nout) < 0) {
+ goto fail;
+ }
/* wrap outputs */
for (i = 0; i < ufunc->nout; i++) {
@@ -4790,8 +4792,11 @@ ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (thedict == NULL) {
thedict = PyEval_GetBuiltins();
}
- res = PyDict_GetItem(thedict, npy_um_str_pyvals_name);
- if (res != NULL) {
+ res = PyDict_GetItemWithError(thedict, npy_um_str_pyvals_name);
+ if (res == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
+ else if (res != NULL) {
Py_INCREF(res);
return res;
}
@@ -4987,6 +4992,16 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi
NPY_NO_EXPORT int
PyUFunc_SetUsesArraysAsData(void **data, size_t i)
{
+ /* NumPy 1.19, 2020-01-24 */
+ if (DEPRECATE(
+ "PyUFunc_SetUsesArraysAsData() C-API function is deprecated "
+ "and expected to be removed rapidly. If you are using it (i.e. see "
+ "this warning/error), please notify the NumPy developers. "
+ "It is currently assumed that this function is simply unused and "
+ "its removal will facilitate the implementation of better "
+ "approaches.") < 0) {
+ return -1;
+ }
data[i] = (void*)PyUFunc_SetUsesArraysAsData;
return 0;
}
@@ -5068,21 +5083,12 @@ _free_loop1d_list(PyUFunc_Loop1d *data)
}
}
-#if PY_VERSION_HEX >= 0x03000000
static void
_loop1d_list_free(PyObject *ptr)
{
PyUFunc_Loop1d *data = (PyUFunc_Loop1d *)PyCapsule_GetPointer(ptr, NULL);
_free_loop1d_list(data);
}
-#else
-static void
-_loop1d_list_free(void *ptr)
-{
- PyUFunc_Loop1d *data = (PyUFunc_Loop1d *)ptr;
- _free_loop1d_list(data);
-}
-#endif
/*
@@ -5145,8 +5151,11 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
function, arg_typenums, data);
if (result == 0) {
- cobj = PyDict_GetItem(ufunc->userloops, key);
- if (cobj == NULL) {
+ cobj = PyDict_GetItemWithError(ufunc->userloops, key);
+ if (cobj == NULL && PyErr_Occurred()) {
+ result = -1;
+ }
+ else if (cobj == NULL) {
PyErr_SetString(PyExc_KeyError,
"userloop for user dtype not found");
result = -1;
@@ -5250,9 +5259,12 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
funcdata->nargs = 0;
/* Get entry for this user-defined type*/
- cobj = PyDict_GetItem(ufunc->userloops, key);
+ cobj = PyDict_GetItemWithError(ufunc->userloops, key);
+ if (cobj == NULL && PyErr_Occurred()) {
+ return 0;
+ }
/* If it's not there, then make one and return. */
- if (cobj == NULL) {
+ else if (cobj == NULL) {
cobj = NpyCapsule_FromVoidPtr((void *)funcdata, _loop1d_list_free);
if (cobj == NULL) {
goto fail;
@@ -6056,63 +6068,17 @@ static PyGetSetDef ufunc_getset[] = {
*****************************************************************************/
NPY_NO_EXPORT PyTypeObject PyUFunc_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
- "numpy.ufunc", /* tp_name */
- sizeof(PyUFuncObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)ufunc_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if defined(NPY_PY3K)
- 0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
- (reprfunc)ufunc_repr, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- (ternaryfunc)ufunc_generic_call, /* tp_call */
- (reprfunc)ufunc_repr, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- 0, /* tp_doc */
- (traverseproc)ufunc_traverse, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- ufunc_methods, /* tp_methods */
- 0, /* tp_members */
- ufunc_getset, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
+ .tp_name = "numpy.ufunc",
+ .tp_basicsize = sizeof(PyUFuncObject),
+ .tp_dealloc = (destructor)ufunc_dealloc,
+ .tp_repr = (reprfunc)ufunc_repr,
+ .tp_call = (ternaryfunc)ufunc_generic_call,
+ .tp_str = (reprfunc)ufunc_repr,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
+ .tp_traverse = (traverseproc)ufunc_traverse,
+ .tp_methods = ufunc_methods,
+ .tp_getset = ufunc_getset,
};
/* End of code for ufunc objects */
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 9be7b63a0..2534ff78a 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -883,7 +883,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
/* The type resolver would have upcast already */
if (out_dtypes[0]->type_num == NPY_BOOL) {
PyErr_Format(PyExc_TypeError,
- "numpy boolean subtract, the `-` operator, is deprecated, "
+ "numpy boolean subtract, the `-` operator, is not supported, "
"use the bitwise_xor, the `^` operator, or the logical_xor "
"function instead.");
return -1;
@@ -1347,37 +1347,6 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc,
return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
type_tup, out_dtypes);
}
-/*
- * Function to check and report floor division warning when python2.x is
- * invoked with -3 switch
- * See PEP238 and #7949 for numpy
- * This function will not be hit for py3 or when __future__ imports division.
- * See generate_umath.py for reason
-*/
-NPY_NO_EXPORT int
-PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
- NPY_CASTING casting,
- PyArrayObject **operands,
- PyObject *type_tup,
- PyArray_Descr **out_dtypes)
-{
- /* Deprecation checks needed only on python 2 */
-#if !defined(NPY_PY3K)
- int type_num1, type_num2;
-
- type_num1 = PyArray_DESCR(operands[0])->type_num;
- type_num2 = PyArray_DESCR(operands[1])->type_num;
-
- /* If both types are integer, warn the user, same as python does */
- if (Py_DivisionWarningFlag &&
- (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) &&
- (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) {
- PyErr_Warn(PyExc_DeprecationWarning, "numpy: classic int division");
- }
-#endif
- return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
- type_tup, out_dtypes);
-}
static int
find_userloop(PyUFuncObject *ufunc,
@@ -1410,9 +1379,12 @@ find_userloop(PyUFuncObject *ufunc,
if (key == NULL) {
return -1;
}
- obj = PyDict_GetItem(ufunc->userloops, key);
+ obj = PyDict_GetItemWithError(ufunc->userloops, key);
Py_DECREF(key);
- if (obj == NULL) {
+ if (obj == NULL && PyErr_Occurred()){
+ return -1;
+ }
+ else if (obj == NULL) {
continue;
}
for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
@@ -1815,9 +1787,12 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
if (key == NULL) {
return -1;
}
- obj = PyDict_GetItem(self->userloops, key);
+ obj = PyDict_GetItemWithError(self->userloops, key);
Py_DECREF(key);
- if (obj == NULL) {
+ if (obj == NULL && PyErr_Occurred()){
+ return -1;
+ }
+ else if (obj == NULL) {
continue;
}
for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
@@ -1879,9 +1854,12 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
if (key == NULL) {
return -1;
}
- obj = PyDict_GetItem(self->userloops, key);
+ obj = PyDict_GetItemWithError(self->userloops, key);
Py_DECREF(key);
- if (obj == NULL) {
+ if (obj == NULL && PyErr_Occurred()){
+ return -1;
+ }
+ else if (obj == NULL) {
continue;
}
diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h
index a4e670a8e..1d6ad3358 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.h
+++ b/numpy/core/src/umath/ufunc_type_resolution.h
@@ -72,13 +72,6 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes);
NPY_NO_EXPORT int
-PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
- NPY_CASTING casting,
- PyArrayObject **operands,
- PyObject *type_tup,
- PyArray_Descr **out_dtypes);
-
-NPY_NO_EXPORT int
PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
PyArrayObject **operands,
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 6ec474376..bad42d657 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -70,9 +70,7 @@ object_ufunc_loop_selector(PyUFuncObject *ufunc,
}
PyObject *
-ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) {
- /* Keywords are ignored for now */
-
+ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) {
PyObject *function, *pyname = NULL;
int nin, nout, i, nargs;
PyUFunc_PyFuncData *fdata;
@@ -81,14 +79,18 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
Py_ssize_t fname_len = -1;
void * ptr, **data;
int offset[2];
+ PyObject *identity = NULL; /* note: not the same semantics as Py_None */
+ static char *kwlist[] = {"", "nin", "nout", "identity", NULL};
- if (!PyArg_ParseTuple(args, "Oii:frompyfunc", &function, &nin, &nout)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "Oii|$O:frompyfunc", kwlist,
+ &function, &nin, &nout, &identity)) {
return NULL;
}
if (!PyCallable_Check(function)) {
PyErr_SetString(PyExc_TypeError, "function must be callable");
return NULL;
}
+
nargs = nin + nout;
pyname = PyObject_GetAttrString(function, "__name__");
@@ -146,10 +148,10 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
/* Do a better job someday */
doc = "dynamic ufunc based on a python function";
- self = (PyUFuncObject *)PyUFunc_FromFuncAndData(
+ self = (PyUFuncObject *)PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
(PyUFuncGenericFunction *)pyfunc_functions, data,
- types, /* ntypes */ 1, nin, nout, PyUFunc_None,
- str, doc, /* unused */ 0);
+ types, /* ntypes */ 1, nin, nout, identity ? PyUFunc_IdentityValue : PyUFunc_None,
+ str, doc, /* unused */ 0, NULL, identity);
if (self == NULL) {
PyArray_free(ptr);
@@ -174,7 +176,6 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
PyObject *str, *tmp;
char *docstr, *newdocstr;
-#if defined(NPY_PY3K)
if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc,
&PyUnicode_Type, &str)) {
return NULL;
@@ -184,20 +185,11 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
return NULL;
}
docstr = PyBytes_AS_STRING(tmp);
-#else
- if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc,
- &PyString_Type, &str)) {
- return NULL;
- }
- docstr = PyString_AS_STRING(str);
-#endif
if (NULL != ufunc->doc) {
PyErr_SetString(PyExc_ValueError,
"Cannot change docstring of ufunc with non-NULL docstring");
-#if defined(NPY_PY3K)
Py_DECREF(tmp);
-#endif
return NULL;
}
@@ -211,9 +203,7 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
strcpy(newdocstr, docstr);
ufunc->doc = newdocstr;
-#if defined(NPY_PY3K)
Py_DECREF(tmp);
-#endif
Py_RETURN_NONE;
}
@@ -324,10 +314,8 @@ int initumath(PyObject *m)
PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO));
PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN));
-#if defined(NPY_PY3K)
s = PyDict_GetItemString(d, "true_divide");
PyDict_SetItemString(d, "divide", s);
-#endif
s = PyDict_GetItemString(d, "conjugate");
s2 = PyDict_GetItemString(d, "remainder");
diff --git a/numpy/core/tests/_locales.py b/numpy/core/tests/_locales.py
index 52e4ff36d..ce7b81f00 100644
--- a/numpy/core/tests/_locales.py
+++ b/numpy/core/tests/_locales.py
@@ -1,8 +1,6 @@
"""Provide class for testing in French locale
"""
-from __future__ import division, absolute_import, print_function
-
import sys
import locale
@@ -45,7 +43,7 @@ def find_comma_decimal_point_locale():
return old_locale, new_locale
-class CommaDecimalPointLocale(object):
+class CommaDecimalPointLocale:
"""Sets LC_NUMERIC to a locale with comma as decimal point.
Classes derived from this class have setup and teardown methods that run
diff --git a/numpy/core/tests/data/umath-validation-set-cos b/numpy/core/tests/data/umath-validation-set-cos
index 360ebcd6a..2e75f044c 100644
--- a/numpy/core/tests/data/umath-validation-set-cos
+++ b/numpy/core/tests/data/umath-validation-set-cos
@@ -19,9 +19,7 @@ np.float32,0x80000001,0x3f800000,2
np.float32,0x00000000,0x3f800000,2
np.float32,0x80000000,0x3f800000,2
np.float32,0x00800000,0x3f800000,2
-np.float32,0x7f7fffff,0x3f5a5f96,2
np.float32,0x80800000,0x3f800000,2
-np.float32,0xff7fffff,0x3f5a5f96,2
## 1.00f + 0x00000001 ##
np.float32,0x3f800000,0x3f0a5140,2
np.float32,0x3f800001,0x3f0a513f,2
@@ -36,26 +34,6 @@ np.float32,0x41d92388,0xbed987c7,2
np.float32,0x422dd66c,0x3f5dcab3,2
np.float32,0xc28f5be6,0xbf5688d8,2
np.float32,0x41ab2674,0xbf53aa3b,2
-np.float32,0xd0102756,0x3f45d12d,2
-np.float32,0xcf99405e,0xbe9cf281,2
-np.float32,0xcfd83a12,0x3eaae4ca,2
-np.float32,0x4fb54db0,0xbf7b2894,2
-np.float32,0xcfcca29d,0x3f752e4e,2
-np.float32,0xceec2ac0,0xbf745303,2
-np.float32,0xcfdca97f,0x3ef554a7,2
-np.float32,0xcfe92b0a,0x3f4618f2,2
-np.float32,0x5014b0eb,0x3ee933e6,2
-np.float32,0xcfa7ee96,0xbeedeeb2,2
-np.float32,0x754c09a0,0xbef298de,2
-np.float32,0x77a731fb,0x3f24599f,2
-np.float32,0x76de2494,0x3f79576c,2
-np.float32,0xf74920dc,0xbf4d196e,2
-np.float32,0x7707a312,0xbeb5cb8e,2
-np.float32,0x75bf9790,0xbf7fd7fe,2
-np.float32,0xf4ca7c40,0xbe15107d,2
-np.float32,0x77e91899,0xbe8a968b,2
-np.float32,0xf74c9820,0xbf7f9677,2
-np.float32,0x7785ca29,0xbe6ef93b,2
np.float32,0x3f490fdb,0x3f3504f3,2
np.float32,0xbf490fdb,0x3f3504f3,2
np.float32,0x3fc90fdb,0xb33bbd2e,2
@@ -660,26 +638,6 @@ np.float32,0x4350ea79,0x3631dadb,2
np.float32,0x42dbe957,0xbf800000,2
np.float32,0x425be957,0xb505522a,2
np.float32,0x435be957,0x3f800000,2
-np.float32,0x487fe5ab,0xba140185,2
-np.float32,0x497fe5ab,0x3f7fffd5,2
-np.float32,0x49ffe5ab,0x3f7fff55,2
-np.float32,0x49ffeb37,0x3b9382f5,2
-np.float32,0x497ff0c3,0x3b13049f,2
-np.float32,0x49fff0c3,0xbf7fff57,2
-np.float32,0x49fff64f,0xbb928618,2
-np.float32,0x497ffbdb,0xbf7fffd6,2
-np.float32,0x49fffbdb,0x3f7fff59,2
-np.float32,0x48fffbdb,0xba9207c6,2
-np.float32,0x4e736e56,0xbf800000,2
-np.float32,0x4d4da377,0xbf800000,2
-np.float32,0x4ece58c3,0xbf800000,2
-np.float32,0x4ee0db9c,0xbf800000,2
-np.float32,0x4dee7002,0x3f800000,2
-np.float32,0x4ee86afc,0x38857a23,2
-np.float32,0x4dca4f3f,0xbf800000,2
-np.float32,0x4ecb48af,0xb95d1e10,2
-np.float32,0x4e51e33f,0xbf800000,2
-np.float32,0x4ef5f421,0xbf800000,2
np.float32,0x46027eb2,0x3e7d94c9,2
np.float32,0x4477baed,0xbe7f1824,2
np.float32,0x454b8024,0x3e7f5268,2
diff --git a/numpy/core/tests/data/umath-validation-set-sin b/numpy/core/tests/data/umath-validation-set-sin
index a56273195..64e78ae15 100644
--- a/numpy/core/tests/data/umath-validation-set-sin
+++ b/numpy/core/tests/data/umath-validation-set-sin
@@ -19,9 +19,7 @@ np.float32,0x80000001,0x80000001,2
np.float32,0x00000000,0x00000000,2
np.float32,0x80000000,0x80000000,2
np.float32,0x00800000,0x00800000,2
-np.float32,0x7f7fffff,0xbf0599b3,2
np.float32,0x80800000,0x80800000,2
-np.float32,0xff7fffff,0x3f0599b3,2
## 1.00f ##
np.float32,0x3f800000,0x3f576aa4,2
np.float32,0x3f800001,0x3f576aa6,2
@@ -36,26 +34,6 @@ np.float32,0x41d92388,0x3f67beef,2
np.float32,0x422dd66c,0xbeffb0c1,2
np.float32,0xc28f5be6,0xbf0bae79,2
np.float32,0x41ab2674,0x3f0ffe2b,2
-np.float32,0xd0102756,0x3f227e8a,2
-np.float32,0xcf99405e,0x3f73ad00,2
-np.float32,0xcfd83a12,0xbf7151a7,2
-np.float32,0x4fb54db0,0xbe46354b,2
-np.float32,0xcfcca29d,0xbe9345e6,2
-np.float32,0xceec2ac0,0x3e98dc89,2
-np.float32,0xcfdca97f,0xbf60b2b4,2
-np.float32,0xcfe92b0a,0xbf222705,2
-np.float32,0x5014b0eb,0x3f63e75c,2
-np.float32,0xcfa7ee96,0x3f62ada4,2
-np.float32,0x754c09a0,0xbf617056,2
-np.float32,0x77a731fb,0x3f44472b,2
-np.float32,0x76de2494,0xbe680739,2
-np.float32,0xf74920dc,0xbf193338,2
-np.float32,0x7707a312,0xbf6f51b1,2
-np.float32,0x75bf9790,0xbd0f1a47,2
-np.float32,0xf4ca7c40,0xbf7d45e7,2
-np.float32,0x77e91899,0x3f767181,2
-np.float32,0xf74c9820,0xbd685b75,2
-np.float32,0x7785ca29,0x3f78ee61,2
np.float32,0x3f490fdb,0x3f3504f3,2
np.float32,0xbf490fdb,0xbf3504f3,2
np.float32,0x3fc90fdb,0x3f800000,2
@@ -660,46 +638,21 @@ np.float32,0x4350ea79,0x3f800000,2
np.float32,0x42dbe957,0x3585522a,2
np.float32,0x425be957,0xbf800000,2
np.float32,0x435be957,0xb605522a,2
-np.float32,0x487fe5ab,0xbf7ffffd,2
-np.float32,0x497fe5ab,0xbb14017d,2
-np.float32,0x49ffe5ab,0xbb940164,2
-np.float32,0x49ffeb37,0x3f7fff56,2
-np.float32,0x497ff0c3,0x3f7fffd6,2
-np.float32,0x49fff0c3,0x3b930487,2
-np.float32,0x49fff64f,0xbf7fff58,2
-np.float32,0x497ffbdb,0x3b1207c0,2
-np.float32,0x49fffbdb,0xbb9207a9,2
-np.float32,0x48fffbdb,0xbf7ffff6,2
-np.float32,0x4e736e56,0x397fa7f2,2
-np.float32,0x4d4da377,0xb57c64bc,2
-np.float32,0x4ece58c3,0xb80846c8,2
-np.float32,0x4ee0db9c,0x394c4786,2
-np.float32,0x4dee7002,0x381bce96,2
-np.float32,0x4ee86afc,0x3f800000,2
-np.float32,0x4dca4f3f,0xb8e25111,2
-np.float32,0x4ecb48af,0xbf800000,2
-np.float32,0x4e51e33f,0xb8a4fa6f,2
-np.float32,0x4ef5f421,0x387ca7df,2
np.float32,0x476362a2,0xbd7ff911,2
np.float32,0x464c99a4,0x3e7f4d41,2
np.float32,0x4471f73d,0x3e7fe1b0,2
np.float32,0x445a6752,0x3e7ef367,2
np.float32,0x474fa400,0x3e7f9fcd,2
-np.float32,0x47c9e70e,0xbb4bba09,2
np.float32,0x45c1e72f,0xbe7fc7af,2
np.float32,0x4558c91d,0x3e7e9f31,2
np.float32,0x43784f94,0xbdff6654,2
np.float32,0x466e8500,0xbe7ea0a3,2
np.float32,0x468e1c25,0x3e7e22fb,2
-np.float32,0x47d28adc,0xbe7d5e6b,2
np.float32,0x44ea6cfc,0x3dff70c3,2
np.float32,0x4605126c,0x3e7f89ef,2
np.float32,0x4788b3c6,0xbb87d853,2
np.float32,0x4531b042,0x3dffd163,2
-np.float32,0x47e46c29,0xbe7def2b,2
-np.float32,0x47c10e07,0xbdff63d4,2
np.float32,0x43f1f71d,0x3dfff387,2
-np.float32,0x47c3e38c,0x3e7f0b2f,2
np.float32,0x462c3fa5,0xbd7fe13d,2
np.float32,0x441c5354,0xbdff76b4,2
np.float32,0x44908b69,0x3e7dcf0d,2
diff --git a/numpy/core/tests/test_abc.py b/numpy/core/tests/test_abc.py
index d9c61b0c6..30e5748af 100644
--- a/numpy/core/tests/test_abc.py
+++ b/numpy/core/tests/test_abc.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from numpy.testing import assert_
import numbers
@@ -7,7 +5,7 @@ import numbers
import numpy as np
from numpy.core.numerictypes import sctypes
-class TestABC(object):
+class TestABC:
def test_abstract(self):
assert_(issubclass(np.number, numbers.Number))
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 32e2ea537..71b46e551 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import numpy as np
@@ -41,57 +39,38 @@ def test_array_array():
assert_equal(old_refcount, sys.getrefcount(np.float64))
# test string
- S2 = np.dtype((str, 2))
- S3 = np.dtype((str, 3))
- S5 = np.dtype((str, 5))
+ S2 = np.dtype((bytes, 2))
+ S3 = np.dtype((bytes, 3))
+ S5 = np.dtype((bytes, 5))
+ assert_equal(np.array(b"1.0", dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ assert_equal(np.array(b"1.0").dtype, S3)
+ assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
+ assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
+ assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
+
+ # test string
+ U2 = np.dtype((str, 2))
+ U3 = np.dtype((str, 3))
+ U5 = np.dtype((str, 5))
assert_equal(np.array("1.0", dtype=np.float64),
np.ones((), dtype=np.float64))
- assert_equal(np.array("1.0").dtype, S3)
- assert_equal(np.array("1.0", dtype=str).dtype, S3)
- assert_equal(np.array("1.0", dtype=S2), np.array("1."))
- assert_equal(np.array("1", dtype=S5), np.ones((), dtype=S5))
-
- # test unicode
- _unicode = globals().get("unicode")
- if _unicode:
- U2 = np.dtype((_unicode, 2))
- U3 = np.dtype((_unicode, 3))
- U5 = np.dtype((_unicode, 5))
- assert_equal(np.array(_unicode("1.0"), dtype=np.float64),
- np.ones((), dtype=np.float64))
- assert_equal(np.array(_unicode("1.0")).dtype, U3)
- assert_equal(np.array(_unicode("1.0"), dtype=_unicode).dtype, U3)
- assert_equal(np.array(_unicode("1.0"), dtype=U2),
- np.array(_unicode("1.")))
- assert_equal(np.array(_unicode("1"), dtype=U5),
- np.ones((), dtype=U5))
+ assert_equal(np.array("1.0").dtype, U3)
+ assert_equal(np.array("1.0", dtype=str).dtype, U3)
+ assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
+ assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
builtins = getattr(__builtins__, '__dict__', __builtins__)
assert_(hasattr(builtins, 'get'))
- # test buffer
- _buffer = builtins.get("buffer")
- if _buffer and sys.version_info[:3] >= (2, 7, 5):
- # This test fails for earlier versions of Python.
- # Evidently a bug got fixed in 2.7.5.
- dat = np.array(_buffer('1.0'), dtype=np.float64)
- assert_equal(dat, [49.0, 46.0, 48.0])
- assert_(dat.dtype.type is np.float64)
-
- dat = np.array(_buffer(b'1.0'))
- assert_equal(dat, [49, 46, 48])
- assert_(dat.dtype.type is np.uint8)
-
- # test memoryview, new version of buffer
- _memoryview = builtins.get("memoryview")
- if _memoryview:
- dat = np.array(_memoryview(b'1.0'), dtype=np.float64)
- assert_equal(dat, [49.0, 46.0, 48.0])
- assert_(dat.dtype.type is np.float64)
-
- dat = np.array(_memoryview(b'1.0'))
- assert_equal(dat, [49, 46, 48])
- assert_(dat.dtype.type is np.uint8)
+ # test memoryview
+ dat = np.array(memoryview(b'1.0'), dtype=np.float64)
+ assert_equal(dat, [49.0, 46.0, 48.0])
+ assert_(dat.dtype.type is np.float64)
+
+ dat = np.array(memoryview(b'1.0'))
+ assert_equal(dat, [49, 46, 48])
+ assert_(dat.dtype.type is np.uint8)
# test array interface
a = np.array(100.0, dtype=np.float64)
@@ -296,7 +275,7 @@ def test_array_astype():
)
def test_array_astype_warning(t):
# test ComplexWarning when casting from complex to float or int
- a = np.array(10, dtype=np.complex)
+ a = np.array(10, dtype=np.complex_)
assert_warns(np.ComplexWarning, a.astype, t)
def test_copyto_fromscalar():
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 702e68e76..e29217461 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
-from __future__ import division, absolute_import, print_function
-
import sys
import gc
+from hypothesis import given
+from hypothesis.extra import numpy as hynp
import pytest
import numpy as np
@@ -12,7 +12,7 @@ from numpy.testing import (
)
import textwrap
-class TestArrayRepr(object):
+class TestArrayRepr:
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([nan, inf])')
@@ -160,7 +160,7 @@ class TestArrayRepr(object):
assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
-class TestComplexArray(object):
+class TestComplexArray:
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
@@ -207,7 +207,7 @@ class TestComplexArray(object):
for res, val in zip(actual, wanted):
assert_equal(res, val)
-class TestArray2String(object):
+class TestArray2String:
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
@@ -235,12 +235,8 @@ class TestArray2String(object):
return 'O'
x = np.arange(3)
- if sys.version_info[0] >= 3:
- x_hex = "[0x0 0x1 0x2]"
- x_oct = "[0o0 0o1 0o2]"
- else:
- x_hex = "[0x0L 0x1L 0x2L]"
- x_oct = "[0L 01L 02L]"
+ x_hex = "[0x0 0x1 0x2]"
+ x_oct = "[0o0 0o1 0o2]"
assert_(np.array2string(x, formatter={'all':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
@@ -399,6 +395,18 @@ class TestArray2String(object):
"[ 'xxxxx']"
)
+ @given(hynp.from_dtype(np.dtype("U")))
+ def test_any_text(self, text):
+ # This test checks that, given any value that can be represented in an
+ # array of dtype("U") (i.e. unicode string), ...
+ a = np.array([text, text, text])
+ # casting a list of them to an array does not e.g. truncate the value
+ assert_equal(a[0], text)
+ # and that np.array2string puts a newline in the expected location
+ expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
+ result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
+ assert_equal(result, expected_repr)
+
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# make sure we do not hold references to the array due to a recursive
@@ -413,7 +421,7 @@ class TestArray2String(object):
gc.enable()
assert_(r1 == r2)
-class TestPrintOptions(object):
+class TestPrintOptions:
"""Test getting and setting global print options."""
def setup(self):
@@ -467,16 +475,10 @@ class TestPrintOptions(object):
assert_equal(repr(x), "array([0., 1., 2.])")
def test_0d_arrays(self):
- unicode = type(u'')
-
- assert_equal(unicode(np.array(u'café', '<U4')), u'café')
+ assert_equal(str(np.array(u'café', '<U4')), u'café')
- if sys.version_info[0] >= 3:
- assert_equal(repr(np.array('café', '<U4')),
- "array('café', dtype='<U4')")
- else:
- assert_equal(repr(np.array(u'café', '<U4')),
- "array(u'caf\\xe9', dtype='<U4')")
+ assert_equal(repr(np.array('café', '<U4')),
+ "array('café', dtype='<U4')")
assert_equal(str(np.array('test', np.str_)), 'test')
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
@@ -709,7 +711,7 @@ class TestPrintOptions(object):
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
dtype=float32)"""))
- styp = '<U4' if sys.version_info[0] >= 3 else '|S4'
+ styp = '<U4'
assert_equal(repr(np.ones(3, dtype=styp)),
"array(['1', '1', '1'], dtype='{}')".format(styp))
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
@@ -847,18 +849,14 @@ class TestPrintOptions(object):
assert_raises(TypeError, np.set_printoptions, threshold=b'1')
def test_unicode_object_array():
- import sys
- if sys.version_info[0] >= 3:
- expected = "array(['é'], dtype=object)"
- else:
- expected = "array([u'\\xe9'], dtype=object)"
+ expected = "array(['é'], dtype=object)"
x = np.array([u'\xe9'], dtype=object)
assert_equal(repr(x), expected)
-class TestContextManager(object):
+class TestContextManager:
def test_ctx_mgr(self):
- # test that context manager actuall works
+ # test that context manager actually works
with np.printoptions(precision=2):
s = str(np.array([2.0]) / 3)
assert_equal(s, '[0.67]')
diff --git a/numpy/core/tests/test_cpu_features.py b/numpy/core/tests/test_cpu_features.py
new file mode 100644
index 000000000..3b5cb3157
--- /dev/null
+++ b/numpy/core/tests/test_cpu_features.py
@@ -0,0 +1,104 @@
+import sys, platform, re, pytest
+
+from numpy.testing import assert_equal
+from numpy.core._multiarray_umath import __cpu_features__
+
+class AbstractTest(object):
+ features = []
+ features_groups = {}
+ features_map = {}
+ features_flags = set()
+
+ def load_flags(self):
+ # a hook
+ pass
+
+ def test_features(self):
+ self.load_flags()
+ for gname, features in self.features_groups.items():
+ test_features = [self.features_map.get(f, f) in self.features_flags for f in features]
+ assert_equal(__cpu_features__.get(gname), all(test_features))
+
+ for feature_name in self.features:
+ map_name = self.features_map.get(feature_name, feature_name)
+ cpu_have = map_name in self.features_flags
+ npy_have = __cpu_features__.get(feature_name)
+ assert_equal(npy_have, cpu_have)
+
+ def load_flags_proc(self, magic_key):
+ with open('/proc/cpuinfo') as fd:
+ for line in fd:
+ if not line.startswith(magic_key):
+ continue
+ flags_value = [s.strip() for s in line.split(':', 1)]
+ if len(flags_value) == 2:
+ self.features_flags = self.features_flags.union(flags_value[1].upper().split())
+
+ def load_flags_auxv(self):
+ import subprocess
+ auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
+ for at in auxv.split(b'\n'):
+ if not at.startswith(b"AT_HWCAP"):
+ continue
+ hwcap_value = [s.strip() for s in at.split(b':', 1)]
+ if len(hwcap_value) == 2:
+ self.features_flags = self.features_flags.union(
+ hwcap_value[1].upper().decode().split()
+ )
+
+is_linux = sys.platform.startswith('linux')
+machine = platform.machine()
+is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_x86, reason="Only for Linux and x86")
+class Test_X86_Features(AbstractTest):
+ features = [
+ "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
+ "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
+ "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
+ "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
+ "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG",
+ ]
+ features_groups = dict(
+ AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
+ AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
+ "AVX5124VNNIW", "AVX512VPOPCNTDQ"],
+ AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
+ AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
+ AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
+ "AVX512VBMI"],
+ AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
+ "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
+ )
+ features_map = dict(
+ SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
+ AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
+ AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
+ )
+ def load_flags(self):
+ self.load_flags_proc("flags")
+
+is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
+class Test_POWER_Features(AbstractTest):
+ features = ["VSX", "VSX2", "VSX3"]
+ features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00")
+
+ def load_flags(self):
+ self.load_flags_auxv()
+
+is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
+class Test_ARM_Features(AbstractTest):
+ features = [
+ "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
+ ]
+ features_groups = dict(
+ NEON_FP16 = ["NEON", "HALF"],
+ NEON_VFPV4 = ["NEON", "VFPV4"],
+ )
+ def load_flags(self):
+ self.load_flags_proc("Features")
+ if re.match("^(aarch64|AARCH64)", platform.machine()):
+ self.features_map = dict(
+ NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
+ )
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index f99c0f72b..438d52f97 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import numpy
import numpy as np
@@ -24,7 +22,7 @@ except NameError:
RecursionError = RuntimeError # python < 3.5
-class TestDateTime(object):
+class TestDateTime:
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
@@ -75,6 +73,15 @@ class TestDateTime(object):
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
+ assert_(np.can_cast('i4', 'm8', casting='same_kind'))
+ assert_(np.can_cast('i4', 'm8', casting='safe'))
+ assert_(np.can_cast('u4', 'm8', casting='same_kind'))
+ assert_(np.can_cast('u4', 'm8', casting='safe'))
+
+ # Cannot cast safely from unsigned integer of the same size, which
+ # could overflow
+ assert_(np.can_cast('u8', 'm8', casting='same_kind'))
+ assert_(not np.can_cast('u8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
@@ -136,6 +143,50 @@ class TestDateTime(object):
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
+ @pytest.mark.parametrize("size", [
+ 3, 21, 217, 1000])
+ def test_datetime_nat_argsort_stability(self, size):
+ # NaT < NaT should be False internally for
+ # sort stability
+ expected = np.arange(size)
+ arr = np.tile(np.datetime64('NaT'), size)
+ assert_equal(np.argsort(arr, kind='mergesort'), expected)
+
+ @pytest.mark.parametrize("size", [
+ 3, 21, 217, 1000])
+ def test_timedelta_nat_argsort_stability(self, size):
+ # NaT < NaT should be False internally for
+ # sort stability
+ expected = np.arange(size)
+ arr = np.tile(np.timedelta64('NaT'), size)
+ assert_equal(np.argsort(arr, kind='mergesort'), expected)
+
+ @pytest.mark.parametrize("arr, expected", [
+ # the example provided in gh-12629
+ (['NaT', 1, 2, 3],
+ [1, 2, 3, 'NaT']),
+ # multiple NaTs
+ (['NaT', 9, 'NaT', -707],
+ [-707, 9, 'NaT', 'NaT']),
+ # this sort explores another code path for NaT
+ ([1, -2, 3, 'NaT'],
+ [-2, 1, 3, 'NaT']),
+ # 2-D array
+ ([[51, -220, 'NaT'],
+ [-17, 'NaT', -90]],
+ [[-220, 51, 'NaT'],
+ [-90, -17, 'NaT']]),
+ ])
+ @pytest.mark.parametrize("dtype", [
+ 'M8[ns]', 'M8[us]',
+ 'm8[ns]', 'm8[us]'])
+ def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
+ # fix for gh-12629 and gh-15063; NaT sorting to end of array
+ arr = np.array(arr, dtype=dtype)
+ expected = np.array(expected, dtype=dtype)
+ arr.sort()
+ assert_equal(arr, expected)
+
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
@@ -483,6 +534,30 @@ class TestDateTime(object):
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
+ # NaN -> NaT
+ nan = np.array([np.nan] * 8)
+ fnan = nan.astype('f')
+ lnan = nan.astype('g')
+ cnan = nan.astype('D')
+ cfnan = nan.astype('F')
+ clnan = nan.astype('G')
+
+ nat = np.array([np.datetime64('NaT')] * 8)
+ assert_equal(nan.astype('M8[ns]'), nat)
+ assert_equal(fnan.astype('M8[ns]'), nat)
+ assert_equal(lnan.astype('M8[ns]'), nat)
+ assert_equal(cnan.astype('M8[ns]'), nat)
+ assert_equal(cfnan.astype('M8[ns]'), nat)
+ assert_equal(clnan.astype('M8[ns]'), nat)
+
+ nat = np.array([np.timedelta64('NaT')] * 8)
+ assert_equal(nan.astype('timedelta64[ns]'), nat)
+ assert_equal(fnan.astype('timedelta64[ns]'), nat)
+ assert_equal(lnan.astype('timedelta64[ns]'), nat)
+ assert_equal(cnan.astype('timedelta64[ns]'), nat)
+ assert_equal(cfnan.astype('timedelta64[ns]'), nat)
+ assert_equal(clnan.astype('timedelta64[ns]'), nat)
+
def test_days_creation(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 - 365)
@@ -1333,10 +1408,14 @@ class TestDateTime(object):
# Interaction with NaT
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
- assert_equal(np.minimum(a, dtnat), a)
- assert_equal(np.minimum(dtnat, a), a)
- assert_equal(np.maximum(a, dtnat), a)
- assert_equal(np.maximum(dtnat, a), a)
+ assert_equal(np.minimum(a, dtnat), dtnat)
+ assert_equal(np.minimum(dtnat, a), dtnat)
+ assert_equal(np.maximum(a, dtnat), dtnat)
+ assert_equal(np.maximum(dtnat, a), dtnat)
+ assert_equal(np.fmin(dtnat, a), a)
+ assert_equal(np.fmin(a, dtnat), a)
+ assert_equal(np.fmax(dtnat, a), a)
+ assert_equal(np.fmax(a, dtnat), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
@@ -1831,7 +1910,7 @@ class TestDateTime(object):
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
- assert_raises(ValueError, np.arange, d)
+ assert_equal(np.arange(d), np.arange(0, d))
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
@@ -2208,7 +2287,7 @@ class TestDateTime(object):
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
- def test_isfinite(self):
+ def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
@@ -2216,18 +2295,25 @@ class TestDateTime(object):
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
- res = np.array([True, True, False])
- for unit in ['Y', 'M', 'W', 'D',
- 'h', 'm', 's', 'ms', 'us',
- 'ns', 'ps', 'fs', 'as']:
- arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
- arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
- arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
- arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
+ @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
+ 'us', 'ns', 'ps', 'fs', 'as'])
+ @pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
+ '<timedelta64[%s]', '>timedelta64[%s]'])
+ def test_isfinite_isinf_isnan_units(self, unit, dstr):
+ '''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes
+ '''
+ arr_val = [123, -321, "NaT"]
+ arr = np.array(arr_val, dtype= dstr % unit)
+ pos = np.array([True, True, False])
+ neg = np.array([False, False, True])
+ false = np.array([False, False, False])
+ assert_equal(np.isfinite(arr), pos)
+ assert_equal(np.isinf(arr), false)
+ assert_equal(np.isnan(arr), neg)
+
+ def test_assert_equal(self):
+ assert_raises(AssertionError, assert_equal,
+ np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
# construct a co-recursive list
@@ -2280,7 +2366,7 @@ class TestDateTime(object):
assert limit_via_str == limit
-class TestDateTimeData(object):
+class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py
index 7b0e6f8a4..bbb94f7d3 100644
--- a/numpy/core/tests/test_defchararray.py
+++ b/numpy/core/tests/test_defchararray.py
@@ -1,18 +1,15 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
import numpy as np
from numpy.core.multiarray import _vec_string
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises,
- assert_raises_regex, suppress_warnings,
+ assert_raises_regex
)
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
-class TestBasic(object):
+class TestBasic:
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
@@ -83,7 +80,7 @@ class TestBasic(object):
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
-class TestVecString(object):
+class TestVecString:
def test_non_existent_method(self):
def fail():
@@ -122,19 +119,19 @@ class TestVecString(object):
def test_invalid_result_type(self):
def fail():
- _vec_string(['a'], np.integer, 'strip')
+ _vec_string(['a'], np.int_, 'strip')
assert_raises(TypeError, fail)
def test_broadcast_error(self):
def fail():
- _vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],))
+ _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
assert_raises(ValueError, fail)
-class TestWhitespace(object):
+class TestWhitespace:
def setup(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
@@ -149,7 +146,7 @@ class TestWhitespace(object):
assert_(not np.any(self.A < self.B))
assert_(not np.any(self.A != self.B))
-class TestChar(object):
+class TestChar:
def setup(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
@@ -157,7 +154,7 @@ class TestChar(object):
assert_equal(self.A.shape, (4,))
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
-class TestComparisons(object):
+class TestComparisons:
def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
@@ -198,7 +195,7 @@ class TestComparisonsMixed2(TestComparisons):
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
-class TestInformation(object):
+class TestInformation:
def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
@@ -304,7 +301,7 @@ class TestInformation(object):
assert_raises(TypeError, fail)
-class TestMethods(object):
+class TestMethods:
def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
@@ -343,15 +340,8 @@ class TestMethods(object):
assert_array_equal(C, tgt)
def test_decode(self):
- if sys.version_info[0] >= 3:
- A = np.char.array([b'\\u03a3'])
- assert_(A.decode('unicode-escape')[0] == '\u03a3')
- else:
- with suppress_warnings() as sup:
- if sys.py3kwarning:
- sup.filter(DeprecationWarning, "'hex_codec'")
- A = np.char.array(['736563726574206d657373616765'])
- assert_(A.decode('hex_codec')[0] == 'secret message')
+ A = np.char.array([b'\\u03a3'])
+ assert_(A.decode('unicode-escape')[0] == '\u03a3')
def test_encode(self):
B = self.B.encode('unicode_escape')
@@ -362,18 +352,12 @@ class TestMethods(object):
assert_(T[2, 0] == b'123 345 \0')
def test_join(self):
- if sys.version_info[0] >= 3:
- # NOTE: list(b'123') == [49, 50, 51]
- # so that b','.join(b'123') results to an error on Py3
- A0 = self.A.decode('ascii')
- else:
- A0 = self.A
+ # NOTE: list(b'123') == [49, 50, 51]
+ # so that b','.join(b'123') results to an error on Py3
+ A0 = self.A.decode('ascii')
A = np.char.join([',', '#'], A0)
- if sys.version_info[0] >= 3:
- assert_(issubclass(A.dtype.type, np.unicode_))
- else:
- assert_(issubclass(A.dtype.type, np.string_))
+ assert_(issubclass(A.dtype.type, np.unicode_))
tgt = np.array([[' ,a,b,c, ', ''],
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
@@ -444,15 +428,6 @@ class TestMethods(object):
assert_(issubclass(R.dtype.type, np.string_))
assert_array_equal(R, tgt)
- if sys.version_info[0] < 3:
- # NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3
- R = self.A.replace(b'a', u'\u03a3')
- tgt = [[u' \u03a3bc ', ''],
- ['12345', u'MixedC\u03a3se'],
- ['123 \t 345 \x00', 'UPPER']]
- assert_(issubclass(R.dtype.type, np.unicode_))
- assert_array_equal(R, tgt)
-
def test_rjust(self):
assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
@@ -599,7 +574,7 @@ class TestMethods(object):
[False, False], [True, False], [False, False]])
-class TestOperations(object):
+class TestOperations:
def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 8bffaa9af..82d24e0f7 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -3,15 +3,12 @@ Tests related to deprecation warnings. Also a convenient place
to document how deprecations should eventually be turned into errors.
"""
-from __future__ import division, absolute_import, print_function
-
import datetime
-import sys
import operator
import warnings
import pytest
-import shutil
import tempfile
+import re
import numpy as np
from numpy.testing import (
@@ -27,7 +24,7 @@ except ImportError:
_has_pytz = False
-class _DeprecationTestCase(object):
+class _DeprecationTestCase:
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
@@ -137,7 +134,7 @@ class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
-class TestNonTupleNDIndexDeprecation(object):
+class TestNonTupleNDIndexDeprecation:
def test_basic(self):
a = np.zeros((5, 5))
with warnings.catch_warnings():
@@ -172,7 +169,7 @@ class TestComparisonDeprecations(_DeprecationTestCase):
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
- # Element comparison error (numpy array can't be compared).
+ # ragged array comparison returns True/False
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
@@ -189,7 +186,7 @@ class TestComparisonDeprecations(_DeprecationTestCase):
assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
- class NotArray(object):
+ class NotArray:
def __array__(self):
raise TypeError
@@ -229,15 +226,10 @@ class TestComparisonDeprecations(_DeprecationTestCase):
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
- if sys.version_info[0] >= 3:
- # py3
- with warnings.catch_warnings() as l:
- warnings.filterwarnings("always")
- assert_raises(TypeError, f, arg1, arg2)
- assert_(not l)
- else:
- # py2
- assert_warns(DeprecationWarning, f, arg1, arg2)
+ with warnings.catch_warnings() as l:
+ warnings.filterwarnings("always")
+ assert_raises(TypeError, f, arg1, arg2)
+ assert_(not l)
class TestDatetime64Timezone(_DeprecationTestCase):
@@ -334,15 +326,12 @@ class TestNumericStyleTypecodes(_DeprecationTestCase):
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
]
- if sys.version_info[0] < 3:
- deprecated_types.extend(['Unicode0', 'String0'])
-
for dt in deprecated_types:
self.assert_deprecated(np.dtype, exceptions=(TypeError,),
args=(dt,))
-class TestTestDeprecated(object):
+class TestTestDeprecated:
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
test_case_instance.setup()
@@ -357,28 +346,6 @@ class TestTestDeprecated(object):
test_case_instance.teardown()
-class TestClassicIntDivision(_DeprecationTestCase):
- """
- See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2
- if used for division
- List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html
- """
- def test_int_dtypes(self):
- #scramble types and do some mix and match testing
- deprecated_types = [
- 'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16',
- 'intp', 'int64', 'uint32', 'int16'
- ]
- if sys.version_info[0] < 3 and sys.py3kwarning:
- import operator as op
- dt2 = 'bool_'
- for dt1 in deprecated_types:
- a = np.array([1,2,3], dtype=dt1)
- b = np.array([1,2,3], dtype=dt2)
- self.assert_deprecated(op.div, args=(a,b))
- dt2 = dt1
-
-
class TestNonNumericConjugate(_DeprecationTestCase):
"""
Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
@@ -568,3 +535,86 @@ class TestNonZero(_DeprecationTestCase):
def test_zerod(self):
self.assert_deprecated(lambda: np.nonzero(np.array(0)))
self.assert_deprecated(lambda: np.nonzero(np.array(1)))
+
+
+def test_deprecate_ragged_arrays():
+ # 2019-11-29 1.19.0
+ #
+ # NEP 34 deprecated automatic object dtype when creating ragged
+ # arrays. Also see the "ragged" tests in `test_multiarray`
+ #
+ # emits a VisibleDeprecationWarning
+ arg = [1, [2, 3]]
+ with assert_warns(np.VisibleDeprecationWarning):
+ np.array(arg)
+
+
+class TestToString(_DeprecationTestCase):
+ # 2020-03-06 1.19.0
+ message = re.escape("tostring() is deprecated. Use tobytes() instead.")
+
+ def test_tostring(self):
+ arr = np.array(list(b"test\xFF"), dtype=np.uint8)
+ self.assert_deprecated(arr.tostring)
+
+ def test_tostring_matches_tobytes(self):
+ arr = np.array(list(b"test\xFF"), dtype=np.uint8)
+ b = arr.tobytes()
+ with assert_warns(DeprecationWarning):
+ s = arr.tostring()
+ assert s == b
+
+
+class TestDTypeCoercion(_DeprecationTestCase):
+ # 2020-02-06 1.19.0
+ message = "Converting .* to a dtype .*is deprecated"
+ deprecated_types = [
+ # The builtin scalar super types:
+ np.generic, np.flexible, np.number,
+ np.inexact, np.floating, np.complexfloating,
+ np.integer, np.unsignedinteger, np.signedinteger,
+ # character is a deprecated S1 special case:
+ np.character,
+ ]
+
+ def test_dtype_coercion(self):
+ for scalar_type in self.deprecated_types:
+ self.assert_deprecated(np.dtype, args=(scalar_type,))
+
+ def test_array_construction(self):
+ for scalar_type in self.deprecated_types:
+ self.assert_deprecated(np.array, args=([], scalar_type,))
+
+ def test_not_deprecated(self):
+ # All specific types are not deprecated:
+ for group in np.sctypes.values():
+ for scalar_type in group:
+ self.assert_not_deprecated(np.dtype, args=(scalar_type,))
+
+ for scalar_type in [type, dict, list, tuple]:
+ # Typical python types are coerced to object currently:
+ self.assert_not_deprecated(np.dtype, args=(scalar_type,))
+
+
+class BuiltInRoundComplexDType(_DeprecationTestCase):
+ # 2020-03-31 1.19.0
+ deprecated_types = [np.csingle, np.cdouble, np.clongdouble]
+ not_deprecated_types = [
+ np.int8, np.int16, np.int32, np.int64,
+ np.uint8, np.uint16, np.uint32, np.uint64,
+ np.float16, np.float32, np.float64,
+ ]
+
+ def test_deprecated(self):
+ for scalar_type in self.deprecated_types:
+ scalar = scalar_type(0)
+ self.assert_deprecated(round, args=(scalar,))
+ self.assert_deprecated(round, args=(scalar, 0))
+ self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
+
+ def test_not_deprecated(self):
+ for scalar_type in self.not_deprecated_types:
+ scalar = scalar_type(0)
+ self.assert_not_deprecated(round, args=(scalar,))
+ self.assert_not_deprecated(round, args=(scalar, 0))
+ self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index d2fbbae5b..c9a65cd9c 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import operator
import pytest
@@ -23,9 +21,9 @@ def assert_dtype_not_equal(a, b):
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
-class TestBuiltin(object):
+class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
- np.unicode])
+ np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
@@ -138,15 +136,15 @@ class TestBuiltin(object):
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
- x = np.dtype({'names': ['A', 'B'],
- 'formats': ['i4', 'f4'],
+ x = np.dtype({'names': ['A', 'B'],
+ 'formats': ['i4', 'f4'],
'offsets': [0, 4]})
- y = np.dtype({'names': ['B', 'A'],
- 'formats': ['f4', 'i4'],
+ y = np.dtype({'names': ['B', 'A'],
+ 'formats': ['f4', 'i4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
-class TestRecord(object):
+class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
@@ -420,7 +418,7 @@ class TestRecord(object):
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
def test_fieldless_views(self):
- a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
+ a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
'itemsize':8})
assert_raises(ValueError, a.view, np.dtype([]))
@@ -445,7 +443,7 @@ class TestRecord(object):
np.ones((1, 2), dtype=bool))
-class TestSubarray(object):
+class TestSubarray:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
@@ -521,7 +519,7 @@ class TestSubarray(object):
assert_(isinstance(dt['a'].shape, tuple))
#
- class IntLike(object):
+ class IntLike:
def __index__(self):
return 3
@@ -711,7 +709,7 @@ class TestStructuredObjectRefcounting:
assert after_repeat - after == count * 2 * 10
-class TestStructuredDtypeSparseFields(object):
+class TestStructuredDtypeSparseFields:
"""Tests subarray fields which contain sparse dtypes so that
not all memory is used by the dtype work. Such dtype's should
leave the underlying memory unchanged.
@@ -732,7 +730,7 @@ class TestStructuredDtypeSparseFields(object):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
def test_sparse_field_assignment_fancy(self):
- # Fancy assignment goes to the copyswap function for comlex types:
+ # Fancy assignment goes to the copyswap function for complex types:
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
@@ -741,7 +739,7 @@ class TestStructuredDtypeSparseFields(object):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
-class TestMonsterType(object):
+class TestMonsterType:
"""Test deeply nested subtypes."""
def test1(self):
@@ -759,7 +757,7 @@ class TestMonsterType(object):
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
-class TestMetadata(object):
+class TestMetadata:
def test_no_metadata(self):
d = np.dtype(int)
assert_(d.metadata is None)
@@ -781,7 +779,7 @@ class TestMetadata(object):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_(d.metadata == {'datum': 1})
-class TestString(object):
+class TestString:
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
@@ -902,11 +900,6 @@ class TestString(object):
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
assert_equal(str(dt), "('<i2', (1,))")
- @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only")
- def test_dtype_str_with_long_in_shape(self):
- # Pull request #376, should not error
- np.dtype('(1L,)i4')
-
def test_base_dtype_with_object_type(self):
# Issue gh-2798, should not error.
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
@@ -934,7 +927,7 @@ class TestString(object):
assert_equal(dt.name, 'record16')
-class TestDtypeAttributeDeletion(object):
+class TestDtypeAttributeDeletion:
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
@@ -952,7 +945,7 @@ class TestDtypeAttributeDeletion(object):
assert_raises(AttributeError, delattr, dt, s)
-class TestDtypeAttributes(object):
+class TestDtypeAttributes:
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
@@ -970,7 +963,7 @@ class TestDtypeAttributes(object):
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
-class TestPickling(object):
+class TestPickling:
def check_pickling(self, dtype):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
@@ -986,7 +979,7 @@ class TestPickling(object):
assert_equal(x[0], y[0])
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
- np.unicode, bool])
+ np.compat.unicode, bool])
def test_builtin(self, t):
self.check_pickling(np.dtype(t))
@@ -1054,7 +1047,7 @@ def test_invalid_dtype_string():
assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
-class TestFromDTypeAttribute(object):
+class TestFromDTypeAttribute:
def test_simple(self):
class dt:
dtype = "f8"
@@ -1098,7 +1091,7 @@ class TestFromDTypeAttribute(object):
with pytest.raises(RecursionError):
np.dtype(dt(1))
-class TestFromCTypes(object):
+class TestFromCTypes:
@staticmethod
def check(ctype, dtype):
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index cfeeb8a90..fd0de8732 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -1,11 +1,9 @@
-from __future__ import division, absolute_import, print_function
-
import itertools
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_raises, suppress_warnings, assert_raises_regex
+ assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
)
# Setup for optimize einsum
@@ -14,7 +12,7 @@ sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
global_size_dict = dict(zip(chars, sizes))
-class TestEinsum(object):
+class TestEinsum:
def test_einsum_errors(self):
for do_opt in [True, False]:
# Need enough arguments
@@ -607,6 +605,10 @@ class TestEinsum(object):
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
+ # Ensure explicitly setting out=None does not cause an error
+ # see issue gh-15776 and issue gh-15256
+ assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]])
+
def test_subscript_range(self):
# Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
# when creating a subscript from arrays
@@ -700,6 +702,14 @@ class TestEinsum(object):
y2 = x[idx[:, None], idx[:, None], idx, idx]
assert_equal(y1, y2)
+ def test_einsum_failed_on_p9_and_s390x(self):
+ # Issues gh-14692 and gh-12689
+ # Bug with signed vs unsigned char errored on power9 and s390x Linux
+ tensor = np.random.random_sample((10, 10, 10, 10))
+ x = np.einsum('ijij->', tensor)
+ y = tensor.trace(axis1=0, axis2=2).trace()
+ assert_allclose(x, y)
+
def test_einsum_all_contig_non_contig_output(self):
# Issue gh-5907, tests that the all contiguous special case
# actually checks the contiguity of the output
@@ -860,7 +870,7 @@ class TestEinsum(object):
self.optimize_compare('obk,ijk->ioj', operands=[g, g])
-class TestEinsumPath(object):
+class TestEinsumPath:
def build_operands(self, string, size_dict=global_size_dict):
# Builds views based off initial operands
diff --git a/numpy/core/tests/test_errstate.py b/numpy/core/tests/test_errstate.py
index 0008c4cc8..184a37300 100644
--- a/numpy/core/tests/test_errstate.py
+++ b/numpy/core/tests/test_errstate.py
@@ -1,14 +1,19 @@
-from __future__ import division, absolute_import, print_function
-
-import platform
import pytest
+import sysconfig
import numpy as np
from numpy.testing import assert_, assert_raises
+# The floating point emulation on ARM EABI systems lacking a hardware FPU is
+# known to be buggy. This is an attempt to identify these hosts. It may not
+# catch all possible cases, but it catches the known cases of gh-413 and
+# gh-15562.
+hosttype = sysconfig.get_config_var('HOST_GNU_TYPE')
+arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi')
-class TestErrstate(object):
- @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
+class TestErrstate:
+ @pytest.mark.skipif(arm_softfloat,
+ reason='platform/cpu issue with FPU (gh-413,-15562)')
def test_invalid(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
@@ -19,6 +24,8 @@ class TestErrstate(object):
with assert_raises(FloatingPointError):
np.sqrt(a)
+ @pytest.mark.skipif(arm_softfloat,
+ reason='platform/cpu issue with FPU (gh-15562)')
def test_divide(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
@@ -28,6 +35,9 @@ class TestErrstate(object):
# While this should fail!
with assert_raises(FloatingPointError):
a // 0
+ # As should this, see gh-15562
+ with assert_raises(FloatingPointError):
+ a // a
def test_errcall(self):
def foo(*args):
diff --git a/numpy/core/tests/test_extint128.py b/numpy/core/tests/test_extint128.py
index 7c454a603..3b64915f3 100644
--- a/numpy/core/tests/test_extint128.py
+++ b/numpy/core/tests/test_extint128.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import itertools
import contextlib
import operator
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 84b60b19c..2197ef0cd 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -1,12 +1,9 @@
-from __future__ import division, absolute_import, print_function
-
from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
ndarray, sqrt, nextafter, stack
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
- suppress_warnings
)
@@ -42,7 +39,7 @@ class PhysicalQuantity2(ndarray):
__array_priority__ = 10
-class TestLogspace(object):
+class TestLogspace:
def test_basic(self):
y = logspace(0, 6)
@@ -92,7 +89,7 @@ class TestLogspace(object):
assert_equal(ls, logspace(1.0, 7.0, 1))
-class TestGeomspace(object):
+class TestGeomspace:
def test_basic(self):
y = geomspace(1, 1e6)
@@ -222,7 +219,7 @@ class TestGeomspace(object):
assert_raises(ValueError, geomspace, 0, 0)
-class TestLinspace(object):
+class TestLinspace:
def test_basic(self):
y = linspace(0, 10)
@@ -309,7 +306,7 @@ class TestLinspace(object):
# Ensure that start/stop can be objects that implement
# __array_interface__ and are convertible to numeric scalars
- class Arrayish(object):
+ class Arrayish:
"""
A generic object that supports the __array_interface__ and hence
can in principle be converted to a numeric scalar, but is not
@@ -351,14 +348,20 @@ class TestLinspace(object):
arange(j+1, dtype=int))
def test_retstep(self):
- y = linspace(0, 1, 2, retstep=True)
- assert_(isinstance(y, tuple) and len(y) == 2)
- for num in (0, 1):
- for ept in (False, True):
+ for num in [0, 1, 2]:
+ for ept in [False, True]:
y = linspace(0, 1, num, endpoint=ept, retstep=True)
- assert_(isinstance(y, tuple) and len(y) == 2 and
- len(y[0]) == num and isnan(y[1]),
- 'num={0}, endpoint={1}'.format(num, ept))
+ assert isinstance(y, tuple) and len(y) == 2
+ if num == 2:
+ y0_expect = [0.0, 1.0] if ept else [0.0, 0.5]
+ assert_array_equal(y[0], y0_expect)
+ assert_equal(y[1], y0_expect[1])
+ elif num == 1 and not ept:
+ assert_array_equal(y[0], [0.0])
+ assert_equal(y[1], 1.0)
+ else:
+ assert_array_equal(y[0], [0.0][:num])
+ assert isnan(y[1])
def test_object(self):
start = array(1, dtype='O')
diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py
index 2f6648183..bcf8cf659 100644
--- a/numpy/core/tests/test_getlimits.py
+++ b/numpy/core/tests/test_getlimits.py
@@ -1,8 +1,6 @@
""" Test functions for limits module.
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
@@ -11,37 +9,37 @@ from numpy.core.getlimits import _discovered_machar, _float_ma
##################################################
-class TestPythonFloat(object):
+class TestPythonFloat:
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
-class TestHalf(object):
+class TestHalf:
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
-class TestSingle(object):
+class TestSingle:
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
-class TestDouble(object):
+class TestDouble:
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
-class TestLongdouble(object):
+class TestLongdouble:
def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
-class TestFinfo(object):
+class TestFinfo:
def test_basic(self):
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
[np.float16, np.float32, np.float64, np.complex64,
@@ -54,7 +52,7 @@ class TestFinfo(object):
getattr(finfo(dt2), attr), attr)
assert_raises(ValueError, finfo, 'i4')
-class TestIinfo(object):
+class TestIinfo:
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
@@ -71,7 +69,7 @@ class TestIinfo(object):
for T in types:
assert_equal(iinfo(T).max, T(-1))
-class TestRepr(object):
+class TestRepr:
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 1e1e6d7d9..c6d046be1 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import platform
import pytest
@@ -18,7 +16,7 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs):
assert_(False,
"Did not raise floating point %s error" % strmatch)
-class TestHalf(object):
+class TestHalf:
def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
diff --git a/numpy/core/tests/test_indexerrors.py b/numpy/core/tests/test_indexerrors.py
index 63b43c473..a0e9a8c55 100644
--- a/numpy/core/tests/test_indexerrors.py
+++ b/numpy/core/tests/test_indexerrors.py
@@ -1,9 +1,10 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
-from numpy.testing import assert_raises
+from numpy.testing import (
+ assert_raises, assert_raises_regex,
+ )
+
-class TestIndexErrors(object):
+class TestIndexErrors:
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
@@ -112,6 +113,15 @@ class TestIndexErrors(object):
assert_raises(IndexError, lambda: a[(1, [0, 1])])
assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
+ def test_mapping_error_message(self):
+ a = np.zeros((3, 5))
+ index = (1, 2, 3, 4, 5)
+ assert_raises_regex(
+ IndexError,
+ "too many indices for array: "
+ "array is 2-dimensional, but 5 were indexed",
+ lambda: a[index])
+
def test_methods(self):
"cases from methods.c"
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 70a5a246f..4bb5cb11a 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -1,21 +1,18 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import warnings
import functools
import operator
-import pytest
import numpy as np
from numpy.core._multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,
- HAS_REFCOUNT, suppress_warnings,
+ HAS_REFCOUNT,
)
-class TestIndexing(object):
+class TestIndexing:
def test_index_no_floats(self):
a = np.array([[[5]]])
@@ -397,14 +394,14 @@ class TestIndexing(object):
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
- class Zero(object):
+ class Zero:
# The most basic valid indexing
def __index__(self):
return 0
z = Zero()
- class ArrayLike(object):
+ class ArrayLike:
# Simple array, should behave like the array
def __array__(self):
return np.array(0)
@@ -484,7 +481,7 @@ class TestIndexing(object):
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
# This object here is very dubious and probably bad though:
- class SequenceLike(object):
+ class SequenceLike:
def __index__(self):
return 0
@@ -527,7 +524,7 @@ class TestIndexing(object):
arr[slices] = 10
assert_array_equal(arr, 10.)
-class TestFieldIndexing(object):
+class TestFieldIndexing:
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
@@ -536,7 +533,7 @@ class TestFieldIndexing(object):
assert_(isinstance(a[['a']], np.ndarray))
-class TestBroadcastedAssignments(object):
+class TestBroadcastedAssignments:
def assign(self, a, ind, val):
a[ind] = val
return a
@@ -587,7 +584,7 @@ class TestBroadcastedAssignments(object):
assert_((a[::-1] == v).all())
-class TestSubclasses(object):
+class TestSubclasses:
def test_basic(self):
# Test that indexing in various ways produces SubClass instances,
# and that the base is set up correctly: the original subclass
@@ -650,56 +647,8 @@ class TestSubclasses(object):
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
- @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
- def test_slice_decref_getsetslice(self):
- # See gh-10066, a temporary slice object should be discarted.
- # This test is only really interesting on Python 2 since
- # it goes through `__set/getslice__` here and can probably be
- # removed. Use 0:7 to make sure it is never None:7.
- class KeepIndexObject(np.ndarray):
- def __getitem__(self, indx):
- self.indx = indx
- if indx == slice(0, 7):
- raise ValueError
-
- def __setitem__(self, indx, val):
- self.indx = indx
- if indx == slice(0, 4):
- raise ValueError
- k = np.array([1]).view(KeepIndexObject)
- k[0:5]
- assert_equal(k.indx, slice(0, 5))
- assert_equal(sys.getrefcount(k.indx), 2)
- try:
- k[0:7]
- raise AssertionError
- except ValueError:
- # The exception holds a reference to the slice so clear on Py2
- if hasattr(sys, 'exc_clear'):
- with suppress_warnings() as sup:
- sup.filter(DeprecationWarning)
- sys.exc_clear()
- assert_equal(k.indx, slice(0, 7))
- assert_equal(sys.getrefcount(k.indx), 2)
-
- k[0:3] = 6
- assert_equal(k.indx, slice(0, 3))
- assert_equal(sys.getrefcount(k.indx), 2)
- try:
- k[0:4] = 2
- raise AssertionError
- except ValueError:
- # The exception holds a reference to the slice so clear on Py2
- if hasattr(sys, 'exc_clear'):
- with suppress_warnings() as sup:
- sup.filter(DeprecationWarning)
- sys.exc_clear()
- assert_equal(k.indx, slice(0, 4))
- assert_equal(sys.getrefcount(k.indx), 2)
-
-
-class TestFancyIndexingCast(object):
+class TestFancyIndexingCast:
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
@@ -721,7 +670,7 @@ class TestFancyIndexingCast(object):
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
-class TestFancyIndexingEquivalence(object):
+class TestFancyIndexingEquivalence:
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
@@ -769,7 +718,7 @@ class TestFancyIndexingEquivalence(object):
assert_array_equal(a, b[0])
-class TestMultiIndexingAutomated(object):
+class TestMultiIndexingAutomated:
"""
These tests use code to mimic the C-Code indexing for selection.
@@ -1191,7 +1140,7 @@ class TestMultiIndexingAutomated(object):
for index in self.complex_indices:
self._check_single_index(a, index)
-class TestFloatNonIntegerArgument(object):
+class TestFloatNonIntegerArgument:
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
@@ -1246,7 +1195,7 @@ class TestFloatNonIntegerArgument(object):
assert_raises(TypeError, np.min, d, (.2, 1.2))
-class TestBooleanIndexing(object):
+class TestBooleanIndexing:
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
@@ -1267,7 +1216,7 @@ class TestBooleanIndexing(object):
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
-class TestArrayToIndexDeprecation(object):
+class TestArrayToIndexDeprecation:
"""Creating an an index from array not 0-D is an error.
"""
@@ -1280,7 +1229,7 @@ class TestArrayToIndexDeprecation(object):
assert_raises(TypeError, np.take, a, [0], a)
-class TestNonIntegerArrayLike(object):
+class TestNonIntegerArrayLike:
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
@@ -1297,7 +1246,7 @@ class TestNonIntegerArrayLike(object):
a.__getitem__([])
-class TestMultipleEllipsisError(object):
+class TestMultipleEllipsisError:
"""An index can only have a single ellipsis.
"""
@@ -1308,7 +1257,7 @@ class TestMultipleEllipsisError(object):
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
-class TestCApiAccess(object):
+class TestCApiAccess:
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py
index 9bd246866..3c35245a3 100644
--- a/numpy/core/tests/test_item_selection.py
+++ b/numpy/core/tests/test_item_selection.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import numpy as np
@@ -8,7 +6,7 @@ from numpy.testing import (
)
-class TestTake(object):
+class TestTake:
def test_simple(self):
a = [[1, 2], [3, 4]]
a_str = [[b'1', b'2'], [b'3', b'4']]
@@ -22,8 +20,9 @@ class TestTake(object):
'clip': {-1: 0, 4: 1}}
# Currently all types but object, use the same function generation.
# So it should not be necessary to test all. However test also a non
- # refcounted struct on top of object.
- types = int, object, np.dtype([('', 'i', 2)])
+ # refcounted struct on top of object, which has a size that hits the
+ # default (non-specialized) path.
+ types = int, object, np.dtype([('', 'i2', 3)])
for t in types:
# ta works, even if the array may be odd if buffer interface is used
ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index 59ac5923c..acef995f3 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import warnings
import pytest
@@ -39,22 +37,36 @@ def test_repr_roundtrip():
assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
-def test_unicode():
- np.longdouble(u"1.2")
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+def test_repr_roundtrip_bytes():
+ o = 1 + LD_INFO.eps
+ assert_equal(np.longdouble(repr(o).encode("ascii")), o)
-def test_string():
- np.longdouble("1.2")
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes))
+def test_array_and_stringlike_roundtrip(strtype):
+ """
+ Test that string representations of long-double roundtrip both
+ for array casting and scalar coercion, see also gh-15608.
+ """
+ o = 1 + LD_INFO.eps
+ if strtype in (np.bytes_, bytes):
+ o_str = strtype(repr(o).encode("ascii"))
+ else:
+ o_str = strtype(repr(o))
-def test_bytes():
- np.longdouble(b"1.2")
+ # Test that `o` is correctly coerced from the string-like
+ assert o == np.longdouble(o_str)
+ # Test that arrays also roundtrip correctly:
+ o_strarr = np.asarray([o] * 3, dtype=strtype)
+ assert (o == o_strarr.astype(np.longdouble)).all()
-@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
-def test_repr_roundtrip_bytes():
- o = 1 + LD_INFO.eps
- assert_equal(np.longdouble(repr(o).encode("ascii")), o)
+ # And array coercion and casting to string give the same as scalar repr:
+ assert (o_strarr == o_str).all()
+ assert (np.asarray([o] * 3).astype(strtype) == o_str).all()
def test_bogus_string():
@@ -71,6 +83,38 @@ def test_fromstring():
err_msg="reading '%s'" % s)
+def test_fromstring_complex():
+ for ctype in ["complex", "cdouble", "cfloat"]:
+ # Check spacing between separator
+ assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype),
+ np.array([1., 2., 3., 4.]))
+ # Real component not specified
+ assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype),
+ np.array([1.j, -2.j, 3.j, 40.j]))
+ # Both components specified
+ assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype),
+ np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+ # Spaces at wrong places
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
+ np.array([1j]))
+
+
def test_fromstring_bogus():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
@@ -89,7 +133,7 @@ def test_fromstring_missing():
np.array([1]))
-class TestFileBased(object):
+class TestFileBased:
ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
@@ -104,6 +148,88 @@ class TestFileBased(object):
res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
+ def test_fromfile_complex(self):
+ for ctype in ["complex", "cdouble", "cfloat"]:
+ # Check spacing between separator and only real component specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1, 2 , 3 ,4\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1., 2., 3., 4.]))
+
+ # Real component not specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1j, -2j, 3j, 4e1j\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
+
+ # Both components specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+1j,2-2j, -3+3j, -4e1+4j\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+2 j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+ 2j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1 +2j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+j\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1j+1\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.j]))
+
+
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_fromfile(self):
diff --git a/numpy/core/tests/test_machar.py b/numpy/core/tests/test_machar.py
index ab8800c09..673f309f1 100644
--- a/numpy/core/tests/test_machar.py
+++ b/numpy/core/tests/test_machar.py
@@ -3,20 +3,18 @@ Test machar. Given recent changes to hardcode type data, we might want to get
rid of both MachAr and this test at some point.
"""
-from __future__ import division, absolute_import, print_function
-
from numpy.core.machar import MachAr
import numpy.core.numerictypes as ntypes
from numpy import errstate, array
-class TestMachAr(object):
+class TestMachAr:
def _run_machar_highprec(self):
# Instantiate MachAr instance with high enough precision to cause
# underflow
try:
hiprec = ntypes.float96
- MachAr(lambda v:array([v], hiprec))
+ MachAr(lambda v: array(v, hiprec))
except AttributeError:
# Fixme, this needs to raise a 'skip' exception.
"Skipping test: no ntypes.float96 available on this platform."
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index 3c8e0e722..675613de4 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -1,6 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
import itertools
import pytest
@@ -8,14 +5,10 @@ import numpy as np
from numpy.core._multiarray_tests import solve_diophantine, internal_overlap
from numpy.core import _umath_tests
from numpy.lib.stride_tricks import as_strided
-from numpy.compat import long
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_array_equal
)
-if sys.version_info[0] >= 3:
- xrange = range
-
ndims = 2
size = 10
@@ -47,9 +40,7 @@ def _indices_for_axis():
res = []
for nelems in (0, 2, 3):
ind = _indices_for_nelems(nelems)
-
- # no itertools.product available in Py2.4
- res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems"
+ res.extend(itertools.product(ind, ind)) # all assignments of size "nelems"
return res
@@ -58,18 +49,7 @@ def _indices(ndims):
"""Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs."""
ind = _indices_for_axis()
-
- # no itertools.product available in Py2.4
-
- res = [[]]
- for i in range(ndims):
- newres = []
- for elem in ind:
- for others in res:
- newres.append([elem] + others)
- res = newres
-
- return res
+ return itertools.product(ind, repeat=ndims)
def _check_assignment(srcidx, dstidx):
@@ -140,11 +120,7 @@ def test_diophantine_fuzz():
# Check no solution exists (provided the problem is
# small enough so that brute force checking doesn't
# take too long)
- try:
- ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U))
- except OverflowError:
- # xrange on 32-bit Python 2 may overflow
- continue
+ ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U))
size = 1
for r in ranges:
@@ -410,7 +386,6 @@ def test_shares_memory_api():
assert_equal(np.shares_memory(a, b), True)
assert_equal(np.shares_memory(a, b, max_work=None), True)
assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1)
- assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1))
def test_may_share_memory_bad_max_work():
@@ -477,7 +452,7 @@ def check_internal_overlap(a, manual_expected=None):
# Brute-force check
m = set()
- ranges = tuple(xrange(n) for n in a.shape)
+ ranges = tuple(range(n) for n in a.shape)
for v in itertools.product(*ranges):
offset = sum(s*w for s, w in zip(a.strides, v))
if offset in m:
@@ -564,7 +539,7 @@ def test_internal_overlap_fuzz():
def test_non_ndarray_inputs():
# Regression check for gh-5604
- class MyArray(object):
+ class MyArray:
def __init__(self, data):
self.data = data
@@ -572,7 +547,7 @@ def test_non_ndarray_inputs():
def __array_interface__(self):
return self.data.__array_interface__
- class MyArray2(object):
+ class MyArray2:
def __init__(self, data):
self.data = data
@@ -619,7 +594,7 @@ def assert_copy_equivalent(operation, args, out, **kwargs):
assert_equal(got, expected)
-class TestUFunc(object):
+class TestUFunc:
"""
Test ufunc call memory overlap handling
"""
@@ -748,6 +723,7 @@ class TestUFunc(object):
a = np.arange(10000, dtype=np.int16)
check(np.add, a, a[::-1], a)
+ @pytest.mark.slow
def test_unary_gufunc_fuzz(self):
shapes = [7, 13, 8, 21, 29, 32]
gufunc = _umath_tests.euclidean_pdist
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index d2ae564b2..feef80ce8 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -1,22 +1,20 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import os
import shutil
import mmap
import pytest
+from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp
from numpy import (
memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
-from numpy.compat import Path
from numpy import arange, allclose, asarray
from numpy.testing import (
assert_, assert_equal, assert_array_equal, suppress_warnings
)
-class TestMemmap(object):
+class TestMemmap:
def setup(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.tempdir = mkdtemp()
@@ -76,7 +74,6 @@ class TestMemmap(object):
del b
del fp
- @pytest.mark.skipif(Path is None, reason="No pathlib.Path")
def test_path(self):
tmpname = mktemp('', 'mmap', dir=self.tempdir)
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 9b124f603..4e2d2ad41 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1,11 +1,4 @@
-from __future__ import division, absolute_import, print_function
-
-try:
- # Accessing collections abstract classes from collections
- # has been deprecated since Python 3.3
- import collections.abc as collections_abc
-except ImportError:
- import collections as collections_abc
+import collections.abc
import tempfile
import sys
import shutil
@@ -23,22 +16,12 @@ from contextlib import contextmanager
from numpy.compat import pickle
-try:
- import pathlib
-except ImportError:
- try:
- import pathlib2 as pathlib
- except ImportError:
- pathlib = None
-
-if sys.version_info[0] >= 3:
- import builtins
-else:
- import __builtin__ as builtins
+import pathlib
+import builtins
from decimal import Decimal
import numpy as np
-from numpy.compat import strchar, unicode
+from numpy.compat import strchar
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
@@ -46,21 +29,13 @@ from numpy.testing import (
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings, break_cycles,
)
+from numpy.testing._private.utils import _no_tracing
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
-if sys.version_info[:2] > (3, 2):
- # In Python 3.3 the representation of empty shape, strides and sub-offsets
- # is an empty tuple instead of None.
- # https://docs.python.org/dev/whatsnew/3.3.html#api-changes
- EMPTY = ()
-else:
- EMPTY = None
-
-
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
@@ -97,7 +72,7 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None):
return data
-class TestFlags(object):
+class TestFlags:
def setup(self):
self.a = np.arange(10)
@@ -171,7 +146,6 @@ class TestFlags(object):
vals.setflags(write=True)
assert_(vals.flags.writeable)
- @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
@pytest.mark.skipif(IS_PYPY, reason="PyPy always copies")
def test_writeable_pickle(self):
import pickle
@@ -265,7 +239,7 @@ class TestFlags(object):
assert_(a.flags.aligned)
-class TestHash(object):
+class TestHash:
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
@@ -287,7 +261,7 @@ class TestHash(object):
err_msg="%r: 2**%d - 1" % (ut, i))
-class TestAttributes(object):
+class TestAttributes:
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
@@ -328,17 +302,8 @@ class TestAttributes(object):
numpy_int = np.int_(0)
- if sys.version_info[0] >= 3:
- # On Py3k int_ should not inherit from int, because it's not
- # fixed-width anymore
- assert_equal(isinstance(numpy_int, int), False)
- else:
- # Otherwise, it should inherit from int...
- assert_equal(isinstance(numpy_int, int), True)
-
- # ... and fast-path checks on C-API level should also work
- from numpy.core._multiarray_tests import test_int_subclass
- assert_equal(test_int_subclass(numpy_int), True)
+ # int_ doesn't inherit from Python int, because it's not fixed-width
+ assert_(not isinstance(numpy_int, int))
def test_stridesattr(self):
x = self.one
@@ -391,6 +356,11 @@ class TestAttributes(object):
a.strides = 1
a[::2].strides = 2
+ # test 0d
+ arr_0d = np.array(0)
+ arr_0d.strides = ()
+ assert_raises(TypeError, set_strides, arr_0d, None)
+
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
@@ -420,7 +390,7 @@ class TestAttributes(object):
assert_array_equal(x['b'], [-2, -2])
-class TestArrayConstruction(object):
+class TestArrayConstruction:
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
@@ -447,7 +417,7 @@ class TestArrayConstruction(object):
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
- r = np.array([[d, d + 1], d + 2])
+ r = np.array([[d, d + 1], d + 2], dtype=object)
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
@@ -498,7 +468,7 @@ class TestArrayConstruction(object):
assert_(np.asfortranarray(d).flags.f_contiguous)
-class TestAssignment(object):
+class TestAssignment:
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
@@ -564,7 +534,7 @@ class TestAssignment(object):
u = np.array([u'done'])
b = np.array([b'done'])
- class bad_sequence(object):
+ class bad_sequence:
def __getitem__(self): pass
def __len__(self): raise RuntimeError
@@ -615,7 +585,7 @@ class TestAssignment(object):
assert_equal(a[0], b"1.1234567890123457")
-class TestDtypedescr(object):
+class TestDtypedescr:
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
@@ -637,7 +607,7 @@ class TestDtypedescr(object):
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
-class TestZeroRank(object):
+class TestZeroRank:
def setup(self):
self.d = np.array(0), np.array('x', object)
@@ -714,6 +684,12 @@ class TestZeroRank(object):
y[()] = 6
assert_equal(x[()], 6)
+ # strides and shape must be the same length
+ with pytest.raises(ValueError):
+ np.ndarray((2,), strides=())
+ with pytest.raises(ValueError):
+ np.ndarray((), strides=(2,))
+
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
@@ -735,7 +711,7 @@ class TestZeroRank(object):
assert_equal(xi.flags.f_contiguous, True)
-class TestScalarIndexing(object):
+class TestScalarIndexing:
def setup(self):
self.d = np.array([0, 1])[0]
@@ -831,12 +807,12 @@ class TestScalarIndexing(object):
assert_equal(a, [0, 1, 0, 1, 2])
-class TestCreation(object):
+class TestCreation:
"""
Test the np.array constructor
"""
def test_from_attribute(self):
- class x(object):
+ class x:
def __array__(self, dtype=None):
pass
@@ -962,36 +938,6 @@ class TestCreation(object):
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
- @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
- def test_sequence_long(self):
- assert_equal(np.array([long(4), long(4)]).dtype, np.long)
- assert_equal(np.array([long(4), 2**80]).dtype, object)
- assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
- assert_equal(np.array([2**80, long(4)]).dtype, object)
-
- def test_sequence_of_array_like(self):
- class ArrayLike:
- def __init__(self):
- self.__array_interface__ = {
- "shape": (42,),
- "typestr": "<i1",
- "data": bytes(42)
- }
-
- # Make sure __array_*__ is used instead of Sequence methods.
- def __iter__(self):
- raise AssertionError("__iter__ was called")
-
- def __getitem__(self, idx):
- raise AssertionError("__getitem__ was called")
-
- def __len__(self):
- return 42
-
- assert_equal(
- np.array([ArrayLike()]),
- np.zeros((1, 42), dtype=np.byte))
-
def test_non_sequence_sequence(self):
"""Should not segfault.
@@ -1001,14 +947,14 @@ class TestCreation(object):
of an error in the Fail case.
"""
- class Fail(object):
+ class Fail:
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
- class Map(object):
+ class Map:
def __len__(self):
return 1
@@ -1046,7 +992,7 @@ class TestCreation(object):
def test_failed_len_sequence(self):
# gh-7393
- class A(object):
+ class A:
def __init__(self, data):
self._data = data
def __getitem__(self, item):
@@ -1073,36 +1019,62 @@ class TestCreation(object):
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
- def test_jagged_ndim_object(self):
- # Lists of mismatching depths are treated as object arrays
- a = np.array([[1], 2, 3])
- assert_equal(a.shape, (3,))
- assert_equal(a.dtype, object)
+ def _ragged_creation(self, seq):
+ # without dtype=object, the ragged object should raise
+ with assert_warns(np.VisibleDeprecationWarning):
+ a = np.array(seq)
+ b = np.array(seq, dtype=object)
+ assert_equal(a, b)
+ return b
- a = np.array([1, [2], 3])
+ def test_ragged_ndim_object(self):
+ # Lists of mismatching depths are treated as object arrays
+ a = self._ragged_creation([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
- a = np.array([1, 2, [3]])
+ a = self._ragged_creation([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
- def test_jagged_shape_object(self):
- # The jagged dimension of a list is turned into an object array
- a = np.array([[1, 1], [2], [3]])
+ a = self._ragged_creation([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
- a = np.array([[1], [2, 2], [3]])
+ def test_ragged_shape_object(self):
+ # The ragged dimension of a list is turned into an object array
+ a = self._ragged_creation([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
- a = np.array([[1], [2], [3, 3]])
+ a = self._ragged_creation([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
-
-class TestStructured(object):
+ a = self._ragged_creation([[1], [2], [3, 3]])
+ assert a.shape == (3,)
+ assert a.dtype == object
+
+ def test_array_of_ragged_array(self):
+ outer = np.array([None, None])
+ outer[0] = outer[1] = np.array([1, 2, 3])
+ assert np.array(outer).shape == (2,)
+ assert np.array([outer]).shape == (1, 2)
+
+ outer_ragged = np.array([None, None])
+ outer_ragged[0] = np.array([1, 2, 3])
+ outer_ragged[1] = np.array([1, 2, 3, 4])
+ # should both of these emit deprecation warnings?
+ assert np.array(outer_ragged).shape == (2,)
+ assert np.array([outer_ragged]).shape == (1, 2,)
+
+ def test_deep_nonragged_object(self):
+ # None of these should raise, even though they are missing dtype=object
+ a = np.array([[[Decimal(1)]]])
+ a = np.array([1, Decimal(1)])
+ a = np.array([[1], [Decimal(1)]])
+
+class TestStructured:
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
@@ -1390,7 +1362,7 @@ class TestStructured(object):
a[['b','c']] # no exception
-class TestBool(object):
+class TestBool:
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
@@ -1480,7 +1452,7 @@ class TestBool(object):
self._test_cast_from_flexible(np.bytes_)
-class TestZeroSizeFlexible(object):
+class TestZeroSizeFlexible:
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
@@ -1496,12 +1468,12 @@ class TestZeroSizeFlexible(object):
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
- zs = self._zeros(10, unicode)
+ zs = self._zeros(10, str)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
- for dt in [bytes, np.void, unicode]:
+ for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
@@ -1523,13 +1495,13 @@ class TestZeroSizeFlexible(object):
def test_resize(self):
# previously an error
- for dt in [bytes, np.void, unicode]:
+ for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
- for dt in [bytes, np.void, unicode]:
+ for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
@@ -1544,7 +1516,7 @@ class TestZeroSizeFlexible(object):
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
- for dt in [bytes, np.void, unicode]:
+ for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
@@ -1567,7 +1539,7 @@ class TestZeroSizeFlexible(object):
assert array_from_buffer[0] == -1, array_from_buffer[0]
-class TestMethods(object):
+class TestMethods:
sort_kinds = ['quicksort', 'heapsort', 'stable']
@@ -1729,53 +1701,60 @@ class TestMethods(object):
b = np.sort(a)
assert_equal(b, a[::-1], msg)
- # all c scalar sorts use the same code with different types
- # so it suffices to run a quick check with one type. The number
- # of sorted items must be greater than ~50 to check the actual
- # algorithm because quick and merge sort fall over to insertion
- # sort for small arrays.
- # Test unsigned dtypes and nonnegative numbers
- for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.float64, np.longdouble]:
- a = np.arange(101, dtype=dtype)
- b = a[::-1].copy()
- for kind in self.sort_kinds:
- msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype)
- c = a.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
- c = b.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
-
- # Test signed dtypes and negative numbers as well
- for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64, np.longdouble]:
- a = np.arange(-50, 51, dtype=dtype)
- b = a[::-1].copy()
- for kind in self.sort_kinds:
- msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype)
- c = a.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
- c = b.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
+ # all c scalar sorts use the same code with different types
+ # so it suffices to run a quick check with one type. The number
+ # of sorted items must be greater than ~50 to check the actual
+ # algorithm because quick and merge sort fall over to insertion
+ # sort for small arrays.
+
+ @pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64,
+ np.float16, np.float32, np.float64,
+ np.longdouble])
+ def test_sort_unsigned(self, dtype):
+ a = np.arange(101, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar sort, kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
- # test complex sorts. These use the same code as the scalars
- # but the compare function differs.
- ai = a*1j + 1
- bi = b*1j + 1
+ @pytest.mark.parametrize('dtype',
+ [np.int8, np.int16, np.int32, np.int64, np.float16,
+ np.float32, np.float64, np.longdouble])
+ def test_sort_signed(self, dtype):
+ a = np.arange(-50, 51, dtype=dtype)
+ b = a[::-1].copy()
for kind in self.sort_kinds:
- msg = "complex sort, real part == 1, kind=%s" % kind
- c = ai.copy()
+ msg = "scalar sort, kind=%s" % (kind)
+ c = a.copy()
c.sort(kind=kind)
- assert_equal(c, ai, msg)
- c = bi.copy()
+ assert_equal(c, a, msg)
+ c = b.copy()
c.sort(kind=kind)
- assert_equal(c, ai, msg)
- ai = a + 1j
- bi = b + 1j
+ assert_equal(c, a, msg)
+
+ @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble])
+ @pytest.mark.parametrize('part', ['real', 'imag'])
+ def test_sort_complex(self, part, dtype):
+ # test complex sorts. These use the same code as the scalars
+ # but the compare function differs.
+ cdtype = {
+ np.single: np.csingle,
+ np.double: np.cdouble,
+ np.longdouble: np.clongdouble,
+ }[dtype]
+ a = np.arange(-50, 51, dtype=dtype)
+ b = a[::-1].copy()
+ ai = (a * (1+1j)).astype(cdtype)
+ bi = (b * (1+1j)).astype(cdtype)
+ setattr(ai, part, 1)
+ setattr(bi, part, 1)
for kind in self.sort_kinds:
- msg = "complex sort, imag part == 1, kind=%s" % kind
+ msg = "complex sort, %s part == 1, kind=%s" % (part, kind)
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
@@ -1783,6 +1762,7 @@ class TestMethods(object):
c.sort(kind=kind)
assert_equal(c, ai, msg)
+ def test_sort_complex_byte_swapping(self):
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
@@ -1792,25 +1772,13 @@ class TestMethods(object):
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
- # test string sorts.
- s = 'aaaaaaaa'
- a = np.array([s + chr(i) for i in range(101)])
+ @pytest.mark.parametrize('dtype', [np.bytes_, np.unicode_])
+ def test_sort_string(self, dtype):
+ # np.array will perform the encoding to bytes for us in the bytes test
+ a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
- msg = "string sort, kind=%s" % kind
- c = a.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
- c = b.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
-
- # test unicode sorts.
- s = 'aaaaaaaa'
- a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
- b = a[::-1].copy()
- for kind in self.sort_kinds:
- msg = "unicode sort, kind=%s" % kind
+ msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@@ -1818,37 +1786,27 @@ class TestMethods(object):
c.sort(kind=kind)
assert_equal(c, a, msg)
+ def test_sort_object(self):
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
- msg = "object sort, kind=%s" % kind
+ msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
-
+
+ def test_sort_structured(self):
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
- msg = "object sort, kind=%s" % kind
- c = a.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
- c = b.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
-
- # test datetime64 sorts.
- a = np.arange(0, 101, dtype='datetime64[D]')
- b = a[::-1]
- for kind in ['q', 'h', 'm']:
- msg = "datetime64 sort, kind=%s" % kind
+ msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@@ -1856,11 +1814,13 @@ class TestMethods(object):
c.sort(kind=kind)
assert_equal(c, a, msg)
- # test timedelta64 sorts.
- a = np.arange(0, 101, dtype='timedelta64[D]')
+ @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]'])
+ def test_sort_time(self, dtype):
+ # test datetime64 and timedelta64 sorts.
+ a = np.arange(0, 101, dtype=dtype)
b = a[::-1]
for kind in ['q', 'h', 'm']:
- msg = "timedelta64 sort, kind=%s" % kind
+ msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@@ -1868,6 +1828,7 @@ class TestMethods(object):
c.sort(kind=kind)
assert_equal(c, a, msg)
+ def test_sort_axis(self):
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
@@ -1883,6 +1844,7 @@ class TestMethods(object):
d.sort()
assert_equal(d, c, "test sort with default axis")
+ def test_sort_size_0(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
@@ -1892,16 +1854,19 @@ class TestMethods(object):
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
+ def test_sort_bad_ordering(self):
# test generic class with bogus ordering,
# should not segfault.
- class Boom(object):
+ class Boom:
def __lt__(self, other):
return True
- a = np.array([Boom()]*100, dtype=object)
+ a = np.array([Boom()] * 100, dtype=object)
for kind in self.sort_kinds:
- msg = "bogus comparison object sort, kind=%s" % kind
+ msg = "kind=%s" % kind
+ c = a.copy()
c.sort(kind=kind)
+ assert_equal(c, a, msg)
def test_void_sort(self):
# gh-8210 - previously segfaulted
@@ -1922,7 +1887,7 @@ class TestMethods(object):
for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
- class Raiser(object):
+ class Raiser:
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
@@ -2059,7 +2024,7 @@ class TestMethods(object):
# test unicode argsorts.
s = 'aaaaaaaa'
- a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
+ a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
@@ -2142,7 +2107,7 @@ class TestMethods(object):
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
- a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
+ a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
@@ -2166,6 +2131,8 @@ class TestMethods(object):
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
+ # check keyword arguments
+ a.searchsorted(v=1)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
@@ -2271,17 +2238,18 @@ class TestMethods(object):
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
- dtype=np.unicode)
+ dtype=np.unicode_)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
- def test_searchsorted_with_sorter(self):
+ def test_searchsorted_with_invalid_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
- assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
+ assert_raises(TypeError, np.searchsorted, a, 0,
+ sorter=np.array((1, (2, 3)), dtype=object))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
@@ -2291,6 +2259,7 @@ class TestMethods(object):
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
+ def test_searchsorted_with_sorter(self):
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
@@ -2964,7 +2933,7 @@ class TestMethods(object):
def test_dot_matmul_inner_array_casting_fails(self):
- class A(object):
+ class A:
def __array__(self, *args, **kwargs):
raise NotImplementedError
@@ -3325,12 +3294,12 @@ class TestMethods(object):
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
-class TestCequenceMethods(object):
+class TestCequenceMethods:
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
-class TestBinop(object):
+class TestBinop:
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
@@ -3385,6 +3354,7 @@ class TestBinop(object):
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
+ 'matmul': (np.matmul, False, float),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
@@ -3392,8 +3362,6 @@ class TestBinop(object):
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
- if sys.version_info >= (3, 5):
- ops['matmul'] = (np.matmul, False, float)
class Coerced(Exception):
pass
@@ -3555,7 +3523,7 @@ class TestBinop(object):
def test_ufunc_override_normalize_signature(self):
# gh-5674
- class SomeClass(object):
+ class SomeClass:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
@@ -3573,7 +3541,7 @@ class TestBinop(object):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
- class CheckIndex(object):
+ class CheckIndex:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
@@ -3602,10 +3570,10 @@ class TestBinop(object):
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
- assert_equal(np.modf(dummy, out=a), (0,))
- assert_(w[0].category is DeprecationWarning)
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs
+ np.modf(dummy, out=a)
+
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
@@ -3661,7 +3629,7 @@ class TestBinop(object):
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
- class SomeClass(object):
+ class SomeClass:
def __init__(self, num=None):
self.num = num
@@ -3702,7 +3670,7 @@ class TestBinop(object):
+tst
-class TestTemporaryElide(object):
+class TestTemporaryElide:
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
@@ -3804,7 +3772,7 @@ class TestTemporaryElide(object):
assert_equal(a, 1)
-class TestCAPI(object):
+class TestCAPI:
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
@@ -3814,16 +3782,14 @@ class TestCAPI(object):
assert_(IsPythonScalar("a"))
-class TestSubscripting(object):
+class TestSubscripting:
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
- if sys.version_info[0] < 3:
- assert_(isinstance(x[0], int))
assert_(type(x[0, ...]) is np.ndarray)
-class TestPickling(object):
+class TestPickling:
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
@@ -3918,10 +3884,7 @@ class TestPickling(object):
assert ref() is None
def _loads(self, obj):
- if sys.version_info[0] >= 3:
- return pickle.loads(obj, encoding='latin1')
- else:
- return pickle.loads(obj)
+ return pickle.loads(obj, encoding='latin1')
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
@@ -3980,7 +3943,7 @@ class TestPickling(object):
assert_equal(original.dtype, new.dtype)
-class TestFancyIndexing(object):
+class TestFancyIndexing:
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
@@ -4034,7 +3997,7 @@ class TestFancyIndexing(object):
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
-class TestStringCompare(object):
+class TestStringCompare:
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
@@ -4066,7 +4029,7 @@ class TestStringCompare(object):
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
-class TestArgmax(object):
+class TestArgmax:
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -4105,17 +4068,17 @@ class TestArgmax(object):
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
- np.datetime64('1932-10-10T03:50:30')], 4),
+ np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
- np.datetime64('2013-05-08T18:15:23')], 0),
+ np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
- np.timedelta64(3, 's')], 3),
+ np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
@@ -4201,7 +4164,7 @@ class TestArgmax(object):
assert_equal(a.argmax(), 1)
-class TestArgmin(object):
+class TestArgmin:
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -4240,17 +4203,17 @@ class TestArgmin(object):
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
- np.datetime64('1932-10-10T03:50:30')], 5),
+ np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
- np.datetime64('2013-05-08T18:15:23')], 4),
+ np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
- np.timedelta64(3, 's')], 1),
+ np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
@@ -4350,7 +4313,7 @@ class TestArgmin(object):
assert_equal(a.argmin(), 1)
-class TestMinMax(object):
+class TestMinMax:
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
@@ -4366,28 +4329,24 @@ class TestMinMax(object):
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
- # NaTs are ignored
+ # Do not ignore NaT
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
- a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
- a[0] = 'NaT'
- assert_equal(np.amin(a), a[1])
- assert_equal(np.amax(a), a[9])
- a.fill('NaT')
- assert_equal(np.amin(a), a[0])
- assert_equal(np.amax(a), a[0])
+ a[3] = 'NaT'
+ assert_equal(np.amin(a), a[3])
+ assert_equal(np.amax(a), a[3])
-class TestNewaxis(object):
+class TestNewaxis:
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
-class TestClip(object):
+class TestClip:
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
@@ -4465,7 +4424,7 @@ class TestClip(object):
assert_array_equal(result, expected)
-class TestCompress(object):
+class TestCompress:
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@@ -4488,14 +4447,13 @@ class TestCompress(object):
assert_equal(out, 1)
-class TestPutmask(object):
+class TestPutmask:
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
- assert_equal(x[mask], T(val))
- assert_equal(x.dtype, T)
+ assert_equal(x[mask], np.array(val, T))
def test_ip_types(self):
- unchecked_types = [bytes, unicode, np.void, object]
+ unchecked_types = [bytes, str, np.void]
x = np.random.random(1000)*100
mask = x < 40
@@ -4506,6 +4464,10 @@ class TestPutmask(object):
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T), T, mask, val)
+ # Also test string of a length which uses an untypical length
+ dt = np.dtype("S3")
+ self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3])
+
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@@ -4539,13 +4501,13 @@ class TestPutmask(object):
assert_equal(x, np.array([True, True, True, True]))
-class TestTake(object):
+class TestTake:
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
- unchecked_types = [bytes, unicode, np.void, object]
+ unchecked_types = [bytes, str, np.void]
x = np.random.random(24)*100
x.shape = 2, 3, 4
@@ -4554,6 +4516,9 @@ class TestTake(object):
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T))
+ # Also test string of a length which uses an untypical length
+ self.tst_basic(x.astype("S3"))
+
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
@@ -4592,7 +4557,7 @@ class TestTake(object):
y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap')
assert_equal(y, np.array([1, 2, 3]))
-class TestLexsort(object):
+class TestLexsort:
@pytest.mark.parametrize('dtype',[
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
@@ -4647,7 +4612,7 @@ class TestLexsort(object):
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
-class TestIO(object):
+class TestIO:
"""Test tofile, fromfile, tobytes, and fromstring"""
def setup(self):
@@ -4688,25 +4653,23 @@ class TestIO(object):
assert_array_equal(d, e)
def test_empty_files_binary(self):
- f = open(self.filename, 'w')
- f.close()
+ with open(self.filename, 'w') as f:
+ pass
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
- f = open(self.filename, 'w')
- f.close()
+ with open(self.filename, 'wb') as f:
+ pass
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
- f = open(self.filename, 'wb')
- self.x.tofile(f)
- f.close()
+ with open(self.filename, 'wb') as f:
+ self.x.tofile(f)
# NB. doesn't work with flush+seek, due to use of C stdio
- f = open(self.filename, 'rb')
- y = np.fromfile(f, dtype=self.dtype)
- f.close()
+ with open(self.filename, 'rb') as f:
+ y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
@@ -4714,14 +4677,12 @@ class TestIO(object):
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
- @pytest.mark.skipif(pathlib is None, reason="pathlib not found")
def test_roundtrip_pathlib(self):
p = pathlib.Path(self.filename)
self.x.tofile(p)
y = np.fromfile(p, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
- @pytest.mark.skipif(pathlib is None, reason="pathlib not found")
def test_roundtrip_dump_pathlib(self):
p = pathlib.Path(self.filename)
self.x.dump(p)
@@ -4805,19 +4766,17 @@ class TestIO(object):
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
- f = open(self.filename, 'wb')
- f.seek(size-1)
- f.write(b'\0')
- f.close()
+ with open(self.filename, 'wb') as f:
+ f.seek(size-1)
+ f.write(b'\0')
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
- f = open(self.filename, mode)
- f.read(2)
- np.fromfile(f, dtype=np.float64, count=1)
- pos = f.tell()
- f.close()
+ with open(self.filename, mode) as f:
+ f.read(2)
+ np.fromfile(f, dtype=np.float64, count=1)
+ pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
@@ -4829,22 +4788,20 @@ class TestIO(object):
for size in sizes:
err_msg = "%d" % (size,)
- f = open(self.filename, 'wb')
- f.seek(size-1)
- f.write(b'\0')
- f.seek(10)
- f.write(b'12')
- np.array([0], dtype=np.float64).tofile(f)
- pos = f.tell()
- f.close()
+ with open(self.filename, 'wb') as f:
+ f.seek(size-1)
+ f.write(b'\0')
+ f.seek(10)
+ f.write(b'12')
+ np.array([0], dtype=np.float64).tofile(f)
+ pos = f.tell()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
- f = open(self.filename, 'r+b')
- f.read(2)
- f.seek(0, 1) # seek between read&write required by ANSI C
- np.array([0], dtype=np.float64).tofile(f)
- pos = f.tell()
- f.close()
+ with open(self.filename, 'r+b') as f:
+ f.read(2)
+ f.seek(0, 1) # seek between read&write required by ANSI C
+ np.array([0], dtype=np.float64).tofile(f)
+ pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self):
@@ -4897,9 +4854,8 @@ class TestIO(object):
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
- f = open(self.filename, 'wb')
- f.write(s)
- f.close()
+ with open(self.filename, 'wb') as f:
+ f.write(s)
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
@@ -4983,33 +4939,28 @@ class TestIO(object):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
- f = open(self.filename, 'wb')
- f.write(s)
- f.close()
+ with open(self.filename, 'wb') as f:
+ f.write(s)
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
- f = open(self.filename, 'w')
- x.tofile(f, sep=',')
- f.close()
- f = open(self.filename, 'r')
- s = f.read()
- f.close()
+ with open(self.filename, 'w') as f:
+ x.tofile(f, sep=',')
+ with open(self.filename, 'r') as f:
+ s = f.read()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
- f = open(self.filename, 'w')
- x.tofile(f, sep=',', format='%.2f')
- f.close()
- f = open(self.filename, 'r')
- s = f.read()
- f.close()
+ with open(self.filename, 'w') as f:
+ x.tofile(f, sep=',', format='%.2f')
+ with open(self.filename, 'r') as f:
+ s = f.read()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
@@ -5037,7 +4988,7 @@ class TestIO(object):
assert_array_equal(x, res)
-class TestFromBuffer(object):
+class TestFromBuffer:
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
@@ -5050,7 +5001,7 @@ class TestFromBuffer(object):
assert_array_equal(np.frombuffer(b''), np.array([]))
-class TestFlat(object):
+class TestFlat:
def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
@@ -5122,7 +5073,9 @@ class TestFlat(object):
assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
-class TestResize(object):
+class TestResize:
+
+ @_no_tracing
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
@@ -5139,6 +5092,7 @@ class TestResize(object):
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
+ @_no_tracing
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
@@ -5172,6 +5126,7 @@ class TestResize(object):
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
+ @_no_tracing
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
@@ -5180,6 +5135,7 @@ class TestResize(object):
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
+ @_no_tracing
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
@@ -5189,6 +5145,7 @@ class TestResize(object):
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
+ @_no_tracing
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
@@ -5215,7 +5172,7 @@ class TestResize(object):
del xref # avoid pyflakes unused variable warning.
-class TestRecord(object):
+class TestRecord:
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
@@ -5228,7 +5185,6 @@ class TestRecord(object):
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_dtype_init)
- @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
@@ -5244,7 +5200,6 @@ class TestRecord(object):
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
- @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_multiple_field_name_unicode(self):
def test_dtype_unicode():
np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
@@ -5252,32 +5207,6 @@ class TestRecord(object):
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_dtype_unicode)
- @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
- def test_unicode_field_titles(self):
- # Unicode field titles are added to field dict on Py2
- title = u'b'
- dt = np.dtype([((title, 'a'), int)])
- dt[title]
- dt['a']
- x = np.array([(1,), (2,), (3,)], dtype=dt)
- x[title]
- x['a']
- y = x[0]
- y[title]
- y['a']
-
- @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
- def test_unicode_field_names(self):
- # Unicode field names are converted to ascii on Python 2:
- encodable_name = u'b'
- assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')
- assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')
-
- # But raises UnicodeEncodeError if it can't be encoded:
- nonencodable_name = u'\uc3bc'
- assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])
- assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])
-
def test_fromarrays_unicode(self):
# A single name string provided to fromarrays() is allowed to be unicode
# on both Python 2 and 3:
@@ -5298,51 +5227,41 @@ class TestRecord(object):
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
- is_py3 = sys.version_info[0] >= 3
- if is_py3:
- funcs = (str,)
- # byte string indexing fails gracefully
- assert_raises(IndexError, a.__setitem__, b'f1', 1)
- assert_raises(IndexError, a.__getitem__, b'f1')
- assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
- assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
- else:
- funcs = (str, unicode)
- for func in funcs:
- b = a.copy()
- fn1 = func('f1')
- b[fn1] = 1
- assert_equal(b[fn1], 1)
- fnn = func('not at all')
- assert_raises(ValueError, b.__setitem__, fnn, 1)
- assert_raises(ValueError, b.__getitem__, fnn)
- b[0][fn1] = 2
- assert_equal(b[fn1], 2)
- # Subfield
- assert_raises(ValueError, b[0].__setitem__, fnn, 1)
- assert_raises(ValueError, b[0].__getitem__, fnn)
- # Subfield
- fn3 = func('f3')
- sfn1 = func('sf1')
- b[fn3][sfn1] = 1
- assert_equal(b[fn3][sfn1], 1)
- assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
- assert_raises(ValueError, b[fn3].__getitem__, fnn)
- # multiple subfields
- fn2 = func('f2')
- b[fn2] = 3
-
- assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
- assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
- assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+ # byte string indexing fails gracefully
+ assert_raises(IndexError, a.__setitem__, b'f1', 1)
+ assert_raises(IndexError, a.__getitem__, b'f1')
+ assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
+ assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
+ b = a.copy()
+ fn1 = str('f1')
+ b[fn1] = 1
+ assert_equal(b[fn1], 1)
+ fnn = str('not at all')
+ assert_raises(ValueError, b.__setitem__, fnn, 1)
+ assert_raises(ValueError, b.__getitem__, fnn)
+ b[0][fn1] = 2
+ assert_equal(b[fn1], 2)
+ # Subfield
+ assert_raises(ValueError, b[0].__setitem__, fnn, 1)
+ assert_raises(ValueError, b[0].__getitem__, fnn)
+ # Subfield
+ fn3 = str('f3')
+ sfn1 = str('sf1')
+ b[fn3][sfn1] = 1
+ assert_equal(b[fn3][sfn1], 1)
+ assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
+ assert_raises(ValueError, b[fn3].__getitem__, fnn)
+ # multiple subfields
+ fn2 = str('f2')
+ b[fn2] = 3
+
+ assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
- if not is_py3:
- pytest.skip('non ascii unicode field indexing skipped; '
- 'raises segfault on python 2.x')
- else:
- assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
- assert_raises(ValueError, a.__getitem__, u'\u03e0')
+ assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
+ assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
@@ -5377,7 +5296,7 @@ class TestRecord(object):
v[:] = (4,5)
assert_equal(a[0].item(), (4, 1, 5))
-class TestView(object):
+class TestView:
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
@@ -5402,7 +5321,7 @@ def _std(a, **args):
return a.std(**args)
-class TestStats(object):
+class TestStats:
funcs = [_mean, _var, _std]
@@ -5574,6 +5493,12 @@ class TestStats(object):
# of float32.
assert_(_mean(np.ones(100000, dtype='float16')) == 1)
+ def test_mean_axis_error(self):
+ # Ensure that AxisError is raised instead of IndexError when axis is
+ # out of bounds, see gh-15817.
+ with assert_raises(np.core._exceptions.AxisError):
+ np.arange(10).mean(axis=2)
+
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
@@ -5583,6 +5508,45 @@ class TestStats(object):
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
+ @pytest.mark.parametrize(('complex_dtype', 'ndec'), (
+ ('complex64', 6),
+ ('complex128', 7),
+ ('clongdouble', 7),
+ ))
+ def test_var_complex_values(self, complex_dtype, ndec):
+ # Test fast-paths for every builtin complex type
+ for axis in [0, 1, None]:
+ mat = self.cmat.copy().astype(complex_dtype)
+ msqr = _mean(mat * mat.conj(), axis=axis)
+ mean = _mean(mat, axis=axis)
+ tgt = msqr - mean * mean.conjugate()
+ res = _var(mat, axis=axis)
+ assert_almost_equal(res, tgt, decimal=ndec)
+
+ def test_var_dimensions(self):
+ # _var paths for complex number introduce additions on views that
+ # increase dimensions. Ensure this generalizes to higher dims
+ mat = np.stack([self.cmat]*3)
+ for axis in [0, 1, 2, -1, None]:
+ msqr = _mean(mat * mat.conj(), axis=axis)
+ mean = _mean(mat, axis=axis)
+ tgt = msqr - mean * mean.conjugate()
+ res = _var(mat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+ def test_var_complex_byteorder(self):
+ # Test that var fast-path does not cause failures for complex arrays
+ # with non-native byteorder
+ cmat = self.cmat.copy().astype('complex128')
+ cmat_swapped = cmat.astype(cmat.dtype.newbyteorder())
+ assert_almost_equal(cmat.var(), cmat_swapped.var())
+
+ def test_var_axis_error(self):
+ # Ensure that AxisError is raised instead of IndexError when axis is
+ # out of bounds, see gh-15817.
+ with assert_raises(np.core._exceptions.AxisError):
+ np.arange(10).var(axis=2)
+
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
@@ -5609,7 +5573,7 @@ class TestStats(object):
res = dat.var(1)
assert_(res.info == dat.info)
-class TestVdot(object):
+class TestVdot:
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
@@ -5669,7 +5633,7 @@ class TestVdot(object):
np.vdot(a.flatten(), b.flatten()))
-class TestDot(object):
+class TestDot:
def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
@@ -5771,7 +5735,7 @@ class TestDot(object):
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
- class Vec(object):
+ class Vec:
def __init__(self, sequence=None):
if sequence is None:
sequence = []
@@ -5940,7 +5904,7 @@ class TestDot(object):
assert_dot_close(A_f_12, X_f_2, desired)
-class MatmulCommon(object):
+class MatmulCommon:
"""Common tests for '@' operator and numpy.matmul.
"""
@@ -6315,59 +6279,55 @@ class TestMatmul(MatmulCommon):
assert not np.any(c)
-if sys.version_info[:2] >= (3, 5):
- class TestMatmulOperator(MatmulCommon):
- import operator
- matmul = operator.matmul
+class TestMatmulOperator(MatmulCommon):
+ import operator
+ matmul = operator.matmul
- def test_array_priority_override(self):
+ def test_array_priority_override(self):
- class A(object):
- __array_priority__ = 1000
+ class A:
+ __array_priority__ = 1000
- def __matmul__(self, other):
- return "A"
+ def __matmul__(self, other):
+ return "A"
- def __rmatmul__(self, other):
- return "A"
+ def __rmatmul__(self, other):
+ return "A"
- a = A()
- b = np.ones(2)
- assert_equal(self.matmul(a, b), "A")
- assert_equal(self.matmul(b, a), "A")
+ a = A()
+ b = np.ones(2)
+ assert_equal(self.matmul(a, b), "A")
+ assert_equal(self.matmul(b, a), "A")
- def test_matmul_raises(self):
- assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
- assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
- assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))
+ def test_matmul_raises(self):
+ assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
+ assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
+ assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))
- def test_matmul_inplace():
- # It would be nice to support in-place matmul eventually, but for now
- # we don't have a working implementation, so better just to error out
- # and nudge people to writing "a = a @ b".
- a = np.eye(3)
- b = np.eye(3)
- assert_raises(TypeError, a.__imatmul__, b)
- import operator
- assert_raises(TypeError, operator.imatmul, a, b)
- # we avoid writing the token `exec` so as not to crash python 2's
- # parser
- exec_ = getattr(builtins, "exec")
- assert_raises(TypeError, exec_, "a @= b", globals(), locals())
-
- def test_matmul_axes():
- a = np.arange(3*4*5).reshape(3, 4, 5)
- c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
- assert c.shape == (3, 4, 4)
- d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
- assert d.shape == (4, 4, 3)
- e = np.swapaxes(d, 0, 2)
- assert_array_equal(e, c)
- f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
- assert f.shape == (4, 5)
-
-
-class TestInner(object):
+def test_matmul_inplace():
+ # It would be nice to support in-place matmul eventually, but for now
+ # we don't have a working implementation, so better just to error out
+ # and nudge people to writing "a = a @ b".
+ a = np.eye(3)
+ b = np.eye(3)
+ assert_raises(TypeError, a.__imatmul__, b)
+ import operator
+ assert_raises(TypeError, operator.imatmul, a, b)
+ assert_raises(TypeError, exec, "a @= b", globals(), locals())
+
+def test_matmul_axes():
+ a = np.arange(3*4*5).reshape(3, 4, 5)
+ c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
+ assert c.shape == (3, 4, 4)
+ d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
+ assert d.shape == (4, 4, 3)
+ e = np.swapaxes(d, 0, 2)
+ assert_array_equal(e, c)
+ f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
+ assert f.shape == (4, 5)
+
+
+class TestInner:
def test_inner_type_mismatch(self):
c = 1.
@@ -6445,7 +6405,7 @@ class TestInner(object):
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
-class TestAlen(object):
+class TestAlen:
def test_basic(self):
with pytest.warns(DeprecationWarning):
m = np.array([1, 2, 3])
@@ -6465,7 +6425,7 @@ class TestAlen(object):
assert_equal(np.alen(5), 1)
-class TestChoose(object):
+class TestChoose:
def setup(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
@@ -6485,8 +6445,17 @@ class TestChoose(object):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+ @pytest.mark.parametrize("ops",
+ [(1000, np.array([1], dtype=np.uint8)),
+ (-1, np.array([1], dtype=np.uint8)),
+ (1., np.float32(3)),
+ (1., np.array([3], dtype=np.float32))],)
+ def test_output_dtype(self, ops):
+ expected_dt = np.result_type(*ops)
+ assert(np.choose([0], ops).dtype == expected_dt)
-class TestRepeat(object):
+
+class TestRepeat:
def setup(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
@@ -6528,7 +6497,7 @@ NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
-class TestNeighborhoodIter(object):
+class TestNeighborhoodIter:
# Simple, 2d tests
def test_simple2d(self, dt):
# Test zero and one padding for simple data type
@@ -6607,7 +6576,7 @@ class TestNeighborhoodIter(object):
# Test stacking neighborhood iterators
-class TestStackedNeighborhoodIter(object):
+class TestStackedNeighborhoodIter:
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
@@ -6757,7 +6726,7 @@ class TestStackedNeighborhoodIter(object):
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
-class TestWarnings(object):
+class TestWarnings:
def test_complex_warning(self):
x = np.array([1, 2])
@@ -6769,7 +6738,7 @@ class TestWarnings(object):
assert_equal(x, [1, 2])
-class TestMinScalarType(object):
+class TestMinScalarType:
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
@@ -6800,7 +6769,7 @@ class TestMinScalarType(object):
from numpy.core._internal import _dtype_from_pep3118
-class TestPEP3118Dtype(object):
+class TestPEP3118Dtype:
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
@@ -6907,7 +6876,7 @@ class TestPEP3118Dtype(object):
self._check('i:f0:', [('f0', 'i')])
-class TestNewBufferProtocol(object):
+class TestNewBufferProtocol:
""" Test PEP3118 buffers """
def _check_roundtrip(self, obj):
@@ -7057,7 +7026,7 @@ class TestNewBufferProtocol(object):
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
- assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
@@ -7067,7 +7036,7 @@ class TestNewBufferProtocol(object):
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
- assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
@@ -7077,7 +7046,7 @@ class TestNewBufferProtocol(object):
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
- assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 4)
def test_export_record(self):
@@ -7110,7 +7079,7 @@ class TestNewBufferProtocol(object):
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
- assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.suboffsets, ())
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
@@ -7126,10 +7095,10 @@ class TestNewBufferProtocol(object):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
- assert_equal(y.shape, EMPTY)
+ assert_equal(y.shape, ())
assert_equal(y.ndim, 0)
- assert_equal(y.strides, EMPTY)
- assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.strides, ())
+ assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 16)
def test_export_endian(self):
@@ -7232,7 +7201,7 @@ class TestNewBufferProtocol(object):
a = np.empty((1,) * 32)
self._check_roundtrip(a)
- @pytest.mark.skipif(sys.version_info < (2, 7, 7), reason="See gh-11115")
+ @pytest.mark.slow
def test_error_too_many_dims(self):
def make_ctype(shape, scalar_type):
t = scalar_type
@@ -7273,12 +7242,11 @@ class TestNewBufferProtocol(object):
np.array(t())
exc = cm.exception
- if sys.version_info.major > 2:
- with assert_raises_regex(
- NotImplementedError,
- r"Unrepresentable .* 'u' \(UCS-2 strings\)"
- ):
- raise exc.__cause__
+ with assert_raises_regex(
+ NotImplementedError,
+ r"Unrepresentable .* 'u' \(UCS-2 strings\)"
+ ):
+ raise exc.__cause__
def test_ctypes_integer_via_memoryview(self):
# gh-11150, due to bpo-10746
@@ -7304,7 +7272,7 @@ class TestNewBufferProtocol(object):
assert_equal(arr['a'], 3)
-class TestArrayAttributeDeletion(object):
+class TestArrayAttributeDeletion:
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
@@ -7339,7 +7307,7 @@ class TestArrayAttributeDeletion(object):
class TestArrayInterface():
- class Foo(object):
+ class Foo:
def __init__(self, value):
self.value = value
self.iface = {'typestr': 'f8'}
@@ -7384,7 +7352,7 @@ class TestArrayInterface():
assert_equal(pre_cnt, post_cnt)
def test_interface_no_shape():
- class ArrayLike(object):
+ class ArrayLike:
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
@@ -7406,7 +7374,7 @@ def test_array_interface_empty_shape():
interface1 = dict(arr.__array_interface__)
interface1['shape'] = ()
- class DummyArray1(object):
+ class DummyArray1:
__array_interface__ = interface1
# NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
@@ -7416,7 +7384,7 @@ def test_array_interface_empty_shape():
interface2 = dict(interface1)
interface2['data'] = arr[0].tobytes()
- class DummyArray2(object):
+ class DummyArray2:
__array_interface__ = interface2
arr1 = np.asarray(DummyArray1())
@@ -7433,7 +7401,7 @@ def test_array_interface_offset():
interface['offset'] = 4
- class DummyArray(object):
+ class DummyArray:
__array_interface__ = interface
arr1 = np.asarray(DummyArray())
@@ -7455,7 +7423,7 @@ def test_scalar_element_deletion():
assert_raises(ValueError, a[0].__delitem__, 'x')
-class TestMemEventHook(object):
+class TestMemEventHook:
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
@@ -7467,7 +7435,7 @@ class TestMemEventHook(object):
break_cycles()
_multiarray_tests.test_pydatamem_seteventhook_end()
-class TestMapIter(object):
+class TestMapIter:
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
@@ -7489,7 +7457,7 @@ class TestMapIter(object):
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
-class TestAsCArray(object):
+class TestAsCArray:
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = _multiarray_tests.test_as_c_array(array, 3)
@@ -7506,7 +7474,7 @@ class TestAsCArray(object):
assert_equal(array[1, 2, 3], from_c)
-class TestConversion(object):
+class TestConversion:
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
@@ -7554,10 +7522,9 @@ class TestConversion(object):
assert_equal(bool(np.array([[42]])), True)
assert_raises(ValueError, bool, np.array([1, 2]))
- class NotConvertible(object):
+ class NotConvertible:
def __bool__(self):
raise NotImplementedError
- __nonzero__ = __bool__ # python 2
assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
@@ -7575,8 +7542,8 @@ class TestConversion(object):
# gh-9972 means that these aren't always the same
int_funcs = (int, lambda x: x.__int__())
for int_func in int_funcs:
+ assert_equal(int_func(np.array(0)), 0)
assert_equal(int_func(np.array([1])), 1)
- assert_equal(int_func(np.array([0])), 0)
assert_equal(int_func(np.array([[42]])), 42)
assert_raises(TypeError, int_func, np.array([1, 2]))
@@ -7591,7 +7558,7 @@ class TestConversion(object):
assert_equal(3, int_func(np.array(HasTrunc())))
assert_equal(3, int_func(np.array([HasTrunc()])))
- class NotConvertible(object):
+ class NotConvertible:
def __int__(self):
raise NotImplementedError
assert_raises(NotImplementedError,
@@ -7600,7 +7567,7 @@ class TestConversion(object):
int_func, np.array([NotConvertible()]))
-class TestWhere(object):
+class TestWhere:
def test_basic(self):
dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
@@ -7767,7 +7734,7 @@ class TestWhere(object):
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
- class TestSizeOf(object):
+ class TestSizeOf:
def test_empty_array(self):
x = np.array([])
@@ -7800,6 +7767,7 @@ if not IS_PYPY:
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
+ @_no_tracing
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
@@ -7813,7 +7781,7 @@ if not IS_PYPY:
assert_raises(TypeError, d.__sizeof__, "a")
-class TestHashing(object):
+class TestHashing:
def test_arrays_not_hashable(self):
x = np.ones(3)
@@ -7821,10 +7789,10 @@ class TestHashing(object):
def test_collections_hashable(self):
x = np.array([])
- assert_(not isinstance(x, collections_abc.Hashable))
+ assert_(not isinstance(x, collections.abc.Hashable))
-class TestArrayPriority(object):
+class TestArrayPriority:
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
@@ -7834,11 +7802,6 @@ class TestArrayPriority(object):
op.ge, op.lt, op.le, op.ne, op.eq
]
- # See #7949. Don't use "/" operator With -3 switch, since python reports it
- # as a DeprecationWarning
- if sys.version_info[0] < 3 and not sys.py3kwarning:
- binary_ops.append(op.div)
-
class Foo(np.ndarray):
__array_priority__ = 100.
@@ -7851,7 +7814,7 @@ class TestArrayPriority(object):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
- class Other(object):
+ class Other:
__array_priority__ = 1000.
def _all(self, other):
@@ -7910,7 +7873,7 @@ class TestArrayPriority(object):
assert_(isinstance(f(b, a), self.Other), msg)
-class TestBytestringArrayNonzero(object):
+class TestBytestringArrayNonzero:
def test_empty_bstring_array_is_falsey(self):
assert_(not np.array([''], dtype=str))
@@ -7931,28 +7894,56 @@ class TestBytestringArrayNonzero(object):
assert_(a)
-class TestUnicodeArrayNonzero(object):
+class TestUnicodeEncoding:
+ """
+ Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping
+ issues, etc
+ """
+ def test_round_trip(self):
+ """ Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """
+ # gh-15363
+ arr = np.zeros(shape=(), dtype="U1")
+ for i in range(1, sys.maxunicode + 1):
+ expected = chr(i)
+ arr[()] = expected
+ assert arr[()] == expected
+ assert arr.item() == expected
+
+ def test_assign_scalar(self):
+ # gh-3258
+ l = np.array(['aa', 'bb'])
+ l[:] = np.unicode_('cc')
+ assert_equal(l, ['cc', 'cc'])
+
+ def test_fill_scalar(self):
+ # gh-7227
+ l = np.array(['aa', 'bb'])
+ l.fill(np.unicode_('cc'))
+ assert_equal(l, ['cc', 'cc'])
+
+
+class TestUnicodeArrayNonzero:
def test_empty_ustring_array_is_falsey(self):
- assert_(not np.array([''], dtype=np.unicode))
+ assert_(not np.array([''], dtype=np.unicode_))
def test_whitespace_ustring_array_is_falsey(self):
- a = np.array(['eggs'], dtype=np.unicode)
+ a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
- a = np.array(['eggs'], dtype=np.unicode)
+ a = np.array(['eggs'], dtype=np.unicode_)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
- a = np.array(['eggs'], dtype=np.unicode)
+ a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0 \0'
assert_(a)
-class TestFormat(object):
+class TestFormat:
def test_0d(self):
a = np.array(np.pi)
@@ -7966,16 +7957,11 @@ class TestFormat(object):
def test_1d_format(self):
# until gh-5543, ensure that the behaviour matches what it used to be
a = np.array([np.pi])
- if sys.version_info[:2] >= (3, 4):
- assert_raises(TypeError, '{:30}'.format, a)
- else:
- with suppress_warnings() as sup:
- sup.filter(PendingDeprecationWarning)
- res = '{:30}'.format(a)
- dst = object.__format__(a, '30')
- assert_equal(res, dst)
+ assert_raises(TypeError, '{:30}'.format, a)
+
+from numpy.testing import IS_PYPY
-class TestCTypes(object):
+class TestCTypes:
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
@@ -8041,11 +8027,33 @@ class TestCTypes(object):
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
+ if IS_PYPY:
+ # Pypy does not recycle arr objects immediately. Trigger gc to
+ # release arr. Cpython uses refcounts. An explicit call to gc
+ # should not be needed here.
+ break_cycles()
+ assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
+
+ def test_ctypes_as_parameter_holds_reference(self):
+ arr = np.array([None]).copy()
+
+ arr_ref = weakref.ref(arr)
+
+ ctypes_ptr = arr.ctypes._as_parameter_
+
+ # `ctypes_ptr` should hold onto `arr`
+ del arr
break_cycles()
+ assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
+
+ # but when the `ctypes_ptr` object dies, so should `arr`
+ del ctypes_ptr
+ if IS_PYPY:
+ break_cycles()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
-class TestWritebackIfCopy(object):
+class TestWritebackIfCopy:
# all these tests use the WRITEBACKIFCOPY mechanism
def test_argmax_with_out(self):
mat = np.eye(5)
@@ -8158,7 +8166,7 @@ class TestWritebackIfCopy(object):
assert_equal(arr, orig)
-class TestArange(object):
+class TestArange:
def test_infinite(self):
assert_raises_regex(
ValueError, "size exceeded",
@@ -8180,7 +8188,7 @@ class TestArange(object):
assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
-class TestArrayFinalize(object):
+class TestArrayFinalize:
""" Tests __array_finalize__ """
def test_receives_base(self):
@@ -8200,7 +8208,7 @@ class TestArrayFinalize(object):
raise Exception(self)
# a plain object can't be weakref'd
- class Dummy(object): pass
+ class Dummy: pass
# get a weak reference to an object within an array
obj_arr = np.array(Dummy())
@@ -8209,9 +8217,6 @@ class TestArrayFinalize(object):
# get an array that crashed in __array_finalize__
with assert_raises(Exception) as e:
obj_arr.view(RaisesInFinalize)
- if sys.version_info.major == 2:
- # prevent an extra reference being kept
- sys.exc_clear()
obj_subarray = e.exception.args[0]
del e
@@ -8240,7 +8245,7 @@ def test_equal_override():
# gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
# did not respect overrides with __array_priority__ or __array_ufunc__.
# The PR fixed this for __array_priority__ and __array_ufunc__ = None.
- class MyAlwaysEqual(object):
+ class MyAlwaysEqual:
def __eq__(self, other):
return "eq"
@@ -8303,7 +8308,7 @@ def test_npymath_real():
assert_allclose(got, expected)
def test_uintalignment_and_alignment():
- # alignment code needs to satisfy these requrements:
+ # alignment code needs to satisfy these requirements:
# 1. numpy structs match C struct layout
# 2. ufuncs/casting is safe wrt to aligned access
# 3. copy code is safe wrt to "uint alidned" access
@@ -8338,7 +8343,7 @@ def test_uintalignment_and_alignment():
dst = np.zeros((2,2), dtype='c8')
dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
-class TestAlignment(object):
+class TestAlignment:
# adapted from scipy._lib.tests.test__util.test__aligned_zeros
# Checks that unusual memory alignments don't trip up numpy.
# In particular, check RELAXED_STRIDES don't trip alignment assertions in
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index cf66751f8..c106c528d 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import pytest
@@ -2104,7 +2102,7 @@ def test_iter_buffering_string():
assert_equal(i[0], b'abc')
assert_equal(i[0].dtype, np.dtype('S6'))
- a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode)
+ a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_)
assert_equal(a.dtype, np.dtype('U4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='U2')
@@ -2188,7 +2186,7 @@ def test_iter_no_broadcast():
[['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
-class TestIterNested(object):
+class TestIterNested:
def test_basic(self):
# Test nested iteration basic usage
@@ -2690,7 +2688,15 @@ def test_0d_iter():
i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()])
assert_equal(i.ndim, 0)
assert_equal(len(i), 1)
- # note that itershape=(), still behaves like None due to the conversions
+
+ i = nditer(np.arange(5), ['multi_index'], [['readonly']],
+ op_axes=[()], itershape=())
+ assert_equal(i.ndim, 0)
+ assert_equal(len(i), 1)
+
+ # passing an itershape alone is not enough, the op_axes are also needed
+ with assert_raises(ValueError):
+ nditer(np.arange(5), ['multi_index'], [['readonly']], itershape=())
# Test a more complex buffered casting case (same as another test above)
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 1358b45e9..bcc6a0c4e 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1,10 +1,9 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import warnings
import itertools
import platform
import pytest
+import math
from decimal import Decimal
import numpy as np
@@ -13,11 +12,14 @@ from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- assert_warns, HAS_REFCOUNT
+ assert_warns, assert_array_max_ulp, HAS_REFCOUNT
)
+from hypothesis import assume, given, strategies as st
+from hypothesis.extra import numpy as hynp
+
-class TestResize(object):
+class TestResize:
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
@@ -49,7 +51,7 @@ class TestResize(object):
assert_equal(A.dtype, Ar.dtype)
-class TestNonarrayArgs(object):
+class TestNonarrayArgs:
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
@@ -138,6 +140,51 @@ class TestNonarrayArgs(object):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
+ s = np.float64(1.)
+ assert_(isinstance(s.round(), np.float64))
+ assert_equal(s.round(), 1.)
+
+ @pytest.mark.parametrize('dtype', [
+ np.int8, np.int16, np.int32, np.int64,
+ np.uint8, np.uint16, np.uint32, np.uint64,
+ np.float16, np.float32, np.float64,
+ ])
+ def test_dunder_round(self, dtype):
+ s = dtype(1)
+ assert_(isinstance(round(s), int))
+ assert_(isinstance(round(s, None), int))
+ assert_(isinstance(round(s, ndigits=None), int))
+ assert_equal(round(s), 1)
+ assert_equal(round(s, None), 1)
+ assert_equal(round(s, ndigits=None), 1)
+
+ @pytest.mark.parametrize('val, ndigits', [
+ pytest.param(2**31 - 1, -1,
+ marks=pytest.mark.xfail(reason="Out of range of int32")
+ ),
+ (2**31 - 1, 1-math.ceil(math.log10(2**31 - 1))),
+ (2**31 - 1, -math.ceil(math.log10(2**31 - 1)))
+ ])
+ def test_dunder_round_edgecases(self, val, ndigits):
+ assert_equal(round(val, ndigits), round(np.int32(val), ndigits))
+
+ def test_dunder_round_accuracy(self):
+ f = np.float64(5.1 * 10**73)
+ assert_(isinstance(round(f, -73), np.float64))
+ assert_array_max_ulp(round(f, -73), 5.0 * 10**73)
+ assert_(isinstance(round(f, ndigits=-73), np.float64))
+ assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73)
+
+ i = np.int64(501)
+ assert_(isinstance(round(i, -2), np.int64))
+ assert_array_max_ulp(round(i, -2), 500)
+ assert_(isinstance(round(i, ndigits=-2), np.int64))
+ assert_array_max_ulp(round(i, ndigits=-2), 500)
+
+ @pytest.mark.xfail(raises=AssertionError, reason="gh-15896")
+ def test_round_py_consistency(self):
+ f = 5.1 * 10**73
+ assert_equal(round(np.float64(f), -73), round(f, -73))
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
@@ -220,7 +267,7 @@ class TestNonarrayArgs(object):
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
-class TestIsscalar(object):
+class TestIsscalar:
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
@@ -236,7 +283,7 @@ class TestIsscalar(object):
assert_(np.isscalar(Number()))
-class TestBoolScalar(object):
+class TestBoolScalar:
def test_logical(self):
f = np.False_
t = np.True_
@@ -269,7 +316,7 @@ class TestBoolScalar(object):
assert_((f ^ f) is f)
-class TestBoolArray(object):
+class TestBoolArray:
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
@@ -356,7 +403,7 @@ class TestBoolArray(object):
assert_array_equal(self.im ^ False, self.im)
-class TestBoolCmp(object):
+class TestBoolCmp:
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
@@ -456,7 +503,7 @@ class TestBoolCmp(object):
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
-class TestSeterr(object):
+class TestSeterr:
def test_default(self):
err = np.geterr()
assert_equal(err,
@@ -537,7 +584,7 @@ class TestSeterr(object):
np.seterrobj(olderrobj)
-class TestFloatExceptions(object):
+class TestFloatExceptions:
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
@@ -633,7 +680,7 @@ class TestFloatExceptions(object):
assert_("underflow" in str(w[-1].message))
-class TestTypes(object):
+class TestTypes:
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
@@ -954,10 +1001,9 @@ class NIterError(Exception):
pass
-class TestFromiter(object):
+class TestFromiter:
def makegen(self):
- for x in range(24):
- yield x**2
+ return (x**2 for x in range(24))
def test_types(self):
ai32 = np.fromiter(self.makegen(), np.int32)
@@ -1005,7 +1051,7 @@ class TestFromiter(object):
self.load_data(count, eindex), dtype=int, count=count)
-class TestNonzero(object):
+class TestNonzero:
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
@@ -1201,6 +1247,17 @@ class TestNonzero(object):
a = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
+ def test_countnonzero_keepdims(self):
+ a = np.array([[0, 0, 1, 0],
+ [0, 3, 5, 0],
+ [7, 9, 2, 0]])
+ assert_equal(np.count_nonzero(a, axis=0, keepdims=True),
+ [[1, 2, 3, 0]])
+ assert_equal(np.count_nonzero(a, axis=1, keepdims=True),
+ [[1], [2], [3]])
+ assert_equal(np.count_nonzero(a, keepdims=True),
+ [[6]])
+
def test_array_method(self):
# Tests that the array method
# call to nonzero works
@@ -1211,14 +1268,12 @@ class TestNonzero(object):
def test_nonzero_invalid_object(self):
# gh-9295
- a = np.array([np.array([1, 2]), 3])
+ a = np.array([np.array([1, 2]), 3], dtype=object)
assert_raises(ValueError, np.nonzero, a)
class BoolErrors:
def __bool__(self):
raise ValueError("Not allowed")
- def __nonzero__(self):
- raise ValueError("Not allowed")
assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
@@ -1288,7 +1343,7 @@ class TestNonzero(object):
assert_raises(ValueError, np.nonzero, a)
-class TestIndex(object):
+class TestIndex:
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
@@ -1305,7 +1360,7 @@ class TestIndex(object):
assert_equal(c.dtype, np.dtype('int32'))
-class TestBinaryRepr(object):
+class TestBinaryRepr:
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
@@ -1347,7 +1402,7 @@ class TestBinaryRepr(object):
'11' + '0'*62)
-class TestBaseRepr(object):
+class TestBaseRepr:
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
@@ -1369,7 +1424,7 @@ class TestBaseRepr(object):
np.base_repr(1, 37)
-class TestArrayComparisons(object):
+class TestArrayComparisons:
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
@@ -1448,7 +1503,7 @@ def assert_array_strict_equal(x, y):
assert_(x.dtype.isnative == y.dtype.isnative)
-class TestClip(object):
+class TestClip:
def setup(self):
self.nr = 5
self.nc = 3
@@ -1884,7 +1939,7 @@ class TestClip(object):
assert_array_strict_equal(ac, act)
def test_clip_with_out_transposed(self):
- # Test that the out argument works when tranposed
+ # Test that the out argument works when transposed
a = np.arange(16).reshape(4, 4)
out = np.empty_like(a).T
a.clip(4, 10, out=out)
@@ -1993,20 +2048,22 @@ class TestClip(object):
actual = np.clip(arr, amin, amax)
assert_equal(actual, exp)
- @pytest.mark.xfail(reason="no scalar nan propagation yet")
+ @pytest.mark.xfail(reason="no scalar nan propagation yet",
+ raises=AssertionError,
+ strict=True)
@pytest.mark.parametrize("arr, amin, amax", [
# problematic scalar nan case from hypothesis
(np.zeros(10, dtype=np.int64),
np.array(np.nan),
np.zeros(10, dtype=np.int32)),
])
+ @pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_clip_scalar_nan_propagation(self, arr, amin, amax):
# enforcement of scalar nan propagation for comparisons
# called through clip()
- expected = np.minimum(np.maximum(a, amin), amax)
- with assert_warns(DeprecationWarning):
- actual = np.clip(arr, amin, amax)
- assert_equal(actual, expected)
+ expected = np.minimum(np.maximum(arr, amin), amax)
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, expected)
@pytest.mark.xfail(reason="propagation doesn't match spec")
@pytest.mark.parametrize("arr, amin, amax", [
@@ -2014,15 +2071,78 @@ class TestClip(object):
np.timedelta64('NaT'),
np.zeros(10, dtype=np.int32)),
])
+ @pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_NaT_propagation(self, arr, amin, amax):
# NOTE: the expected function spec doesn't
# propagate NaT, but clip() now does
- expected = np.minimum(np.maximum(a, amin), amax)
+ expected = np.minimum(np.maximum(arr, amin), amax)
actual = np.clip(arr, amin, amax)
assert_equal(actual, expected)
+ @given(data=st.data(), shape=hynp.array_shapes())
+ def test_clip_property(self, data, shape):
+ """A property-based test using Hypothesis.
-class TestAllclose(object):
+ This aims for maximum generality: it could in principle generate *any*
+ valid inputs to np.clip, and in practice generates much more varied
+ inputs than human testers come up with.
+
+ Because many of the inputs have tricky dependencies - compatible dtypes
+ and mutually-broadcastable shapes - we use `st.data()` strategy draw
+ values *inside* the test function, from strategies we construct based
+ on previous values. An alternative would be to define a custom strategy
+ with `@st.composite`, but until we have duplicated code inline is fine.
+
+ That accounts for most of the function; the actual test is just three
+ lines to calculate and compare actual vs expected results!
+ """
+ # Our base array and bounds should not need to be of the same type as
+ # long as they are all compatible - so we allow any int or float type.
+ dtype_strategy = hynp.integer_dtypes() | hynp.floating_dtypes()
+
+ # The following line is a total hack to disable the varied-dtypes
+ # component of this test, because result != expected if dtypes can vary.
+ dtype_strategy = st.just(data.draw(dtype_strategy))
+
+ # Generate an arbitrary array of the chosen shape and dtype
+ # This is the value that we clip.
+ arr = data.draw(hynp.arrays(dtype=dtype_strategy, shape=shape))
+
+ # Generate shapes for the bounds which can be broadcast with each other
+ # and with the base shape. Below, we might decide to use scalar bounds,
+ # but it's clearer to generate these shapes unconditionally in advance.
+ in_shapes, result_shape = data.draw(
+ hynp.mutually_broadcastable_shapes(
+ num_shapes=2,
+ base_shape=shape,
+ # Commenting out the min_dims line allows zero-dimensional arrays,
+ # and zero-dimensional arrays containing NaN make the test fail.
+ min_dims=1
+
+ )
+ )
+ amin = data.draw(
+ dtype_strategy.flatmap(hynp.from_dtype)
+ | hynp.arrays(dtype=dtype_strategy, shape=in_shapes[0])
+ )
+ amax = data.draw(
+ dtype_strategy.flatmap(hynp.from_dtype)
+ | hynp.arrays(dtype=dtype_strategy, shape=in_shapes[1])
+ )
+ # If we allow either bound to be a scalar `nan`, the test will fail -
+ # so we just "assume" that away (if it is, this raises a special
+ # exception and Hypothesis will try again with different inputs)
+ assume(not np.isscalar(amin) or not np.isnan(amin))
+ assume(not np.isscalar(amax) or not np.isnan(amax))
+
+ # Then calculate our result and expected result and check that they're
+ # equal! See gh-12519 for discussion deciding on this property.
+ result = np.clip(arr, amin, amax)
+ expected = np.minimum(amax, np.maximum(arr, amin))
+ assert_array_equal(result, expected)
+
+
+class TestAllclose:
rtol = 1e-5
atol = 1e-8
@@ -2107,7 +2227,7 @@ class TestAllclose(object):
assert_(type(np.allclose(a, a)) is bool)
-class TestIsclose(object):
+class TestIsclose:
rtol = 1e-5
atol = 1e-8
@@ -2245,7 +2365,7 @@ class TestIsclose(object):
assert_(type(np.isclose(0, np.inf)) is np.bool_)
-class TestStdVar(object):
+class TestStdVar:
def setup(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
@@ -2284,7 +2404,7 @@ class TestStdVar(object):
assert_array_equal(r, out)
-class TestStdVarComplex(object):
+class TestStdVarComplex:
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
@@ -2296,7 +2416,7 @@ class TestStdVarComplex(object):
assert_equal(np.std(1j), 0)
-class TestCreationFuncs(object):
+class TestCreationFuncs:
# Test ones, zeros, empty and full.
def setup(self):
@@ -2367,7 +2487,7 @@ class TestCreationFuncs(object):
assert_(sys.getrefcount(dim) == beg)
-class TestLikeFuncs(object):
+class TestLikeFuncs:
'''Test ones_like, zeros_like, empty_like and full_like'''
def setup(self):
@@ -2394,7 +2514,7 @@ class TestLikeFuncs(object):
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
]
- self.shapes = [(5,), (5,6,), (5,6,7,)]
+ self.shapes = [(), (5,), (5,6,), (5,6,7,)]
def compare_array_value(self, dz, value, fill_value):
if value is not None:
@@ -2517,7 +2637,7 @@ class TestLikeFuncs(object):
self.check_like_function(np.full_like, np.inf, True)
-class TestCorrelate(object):
+class TestCorrelate:
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
@@ -2567,8 +2687,13 @@ class TestCorrelate(object):
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
+ def test_zero_size(self):
+ with pytest.raises(ValueError):
+ np.correlate(np.array([]), np.ones(1000), mode='full')
+ with pytest.raises(ValueError):
+ np.correlate(np.ones(1000), np.array([]), mode='full')
-class TestConvolve(object):
+class TestConvolve:
def test_object(self):
d = [1.] * 100
k = [1.] * 3
@@ -2582,7 +2707,7 @@ class TestConvolve(object):
assert_array_equal(k, np.ones(3))
-class TestArgwhere(object):
+class TestArgwhere:
@pytest.mark.parametrize('nd', [0, 1, 2])
def test_nd(self, nd):
@@ -2619,7 +2744,7 @@ class TestArgwhere(object):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
-class TestStringFunction(object):
+class TestStringFunction:
def test_set_string_function(self):
a = np.array([1])
@@ -2634,7 +2759,7 @@ class TestStringFunction(object):
assert_equal(str(a), "[1]")
-class TestRoll(object):
+class TestRoll:
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
@@ -2692,7 +2817,7 @@ class TestRoll(object):
assert_equal(np.roll(x, 1), np.array([]))
-class TestRollaxis(object):
+class TestRollaxis:
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
@@ -2754,7 +2879,7 @@ class TestRollaxis(object):
assert_(not res.flags['OWNDATA'])
-class TestMoveaxis(object):
+class TestMoveaxis:
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
@@ -2828,7 +2953,7 @@ class TestMoveaxis(object):
assert_(isinstance(result, np.ndarray))
-class TestCross(object):
+class TestCross:
def test_2x2(self):
u = [1, 2]
v = [3, 4]
@@ -2917,7 +3042,7 @@ def test_outer_out_param():
assert_equal(np.outer(arr2, arr3, out2), out2)
-class TestIndices(object):
+class TestIndices:
def test_simple(self):
[x, y] = np.indices((4, 3))
@@ -2948,7 +3073,7 @@ class TestIndices(object):
assert_array_equal(x, np.array([[0], [1], [2], [3]]))
assert_array_equal(y, np.array([[0, 1, 2]]))
- @pytest.mark.parametrize("dtype", [np.int, np.float32, np.float64])
+ @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
def test_return_type(self, dtype, dims):
inds = np.indices(dims, dtype=dtype)
@@ -2958,7 +3083,7 @@ class TestIndices(object):
assert_(arr.dtype == dtype)
-class TestRequire(object):
+class TestRequire:
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
'A', 'ALIGNED',
@@ -3032,7 +3157,7 @@ class TestRequire(object):
self.set_and_check_flag(flag, None, a)
-class TestBroadcast(object):
+class TestBroadcast:
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
@@ -3083,7 +3208,7 @@ class TestBroadcast(object):
assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
-class TestKeepdims(object):
+class TestKeepdims:
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
@@ -3095,7 +3220,7 @@ class TestKeepdims(object):
assert_raises(TypeError, np.sum, x, keepdims=True)
-class TestTensordot(object):
+class TestTensordot:
def test_zero_dimension(self):
# Test resolution to issue #5663
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index 387740e35..9cb00342d 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import itertools
@@ -100,7 +98,7 @@ def normalize_descr(descr):
# Creation tests
############################################################
-class CreateZeros(object):
+class CreateZeros:
"""Check the creation of heterogeneous arrays zero-valued"""
def test_zeros0D(self):
@@ -143,7 +141,7 @@ class TestCreateZerosNested(CreateZeros):
_descr = Ndescr
-class CreateValues(object):
+class CreateValues:
"""Check the creation of heterogeneous arrays with values"""
def test_tuple(self):
@@ -203,7 +201,7 @@ class TestCreateValuesNestedMultiple(CreateValues):
# Reading tests
############################################################
-class ReadValuesPlain(object):
+class ReadValuesPlain:
"""Check the reading of values in heterogeneous arrays (plain)"""
def test_access_fields(self):
@@ -235,7 +233,7 @@ class TestReadValuesPlainMultiple(ReadValuesPlain):
multiple_rows = 1
_buffer = PbufferT
-class ReadValuesNested(object):
+class ReadValuesNested:
"""Check the reading of values in heterogeneous arrays (nested)"""
def test_access_top_fields(self):
@@ -308,10 +306,7 @@ class ReadValuesNested(object):
h = np.array(self._buffer, dtype=self._descr)
assert_(h.dtype['Info']['value'].name == 'complex128')
assert_(h.dtype['Info']['y2'].name == 'float64')
- if sys.version_info[0] >= 3:
- assert_(h.dtype['info']['Name'].name == 'str256')
- else:
- assert_(h.dtype['info']['Name'].name == 'unicode256')
+ assert_(h.dtype['info']['Name'].name == 'str256')
assert_(h.dtype['info']['Value'].name == 'complex128')
def test_nested2_descriptor(self):
@@ -333,14 +328,14 @@ class TestReadValuesNestedMultiple(ReadValuesNested):
multiple_rows = True
_buffer = NbufferT
-class TestEmptyField(object):
+class TestEmptyField:
def test_assign(self):
a = np.arange(10, dtype=np.float32)
a.dtype = [("int", "<0i4"), ("float", "<2f4")]
assert_(a['int'].shape == (5, 0))
assert_(a['float'].shape == (5, 2))
-class TestCommonType(object):
+class TestCommonType:
def test_scalar_loses1(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
assert_(res == 'f4')
@@ -361,7 +356,7 @@ class TestCommonType(object):
res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
assert_(res == 'f8')
-class TestMultipleFields(object):
+class TestMultipleFields:
def setup(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
@@ -376,7 +371,7 @@ class TestMultipleFields(object):
assert_(res == [(1, 3), (5, 7)])
-class TestIsSubDType(object):
+class TestIsSubDType:
# scalar types can be promoted into dtypes
wrappers = [np.dtype, lambda x: x]
@@ -406,19 +401,48 @@ class TestIsSubDType(object):
assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
-
-class TestSctypeDict(object):
+ def test_nondtype_nonscalartype(self):
+ # See gh-14619 and gh-9505 which introduced the deprecation to fix
+ # this. These tests are directly taken from gh-9505
+ assert not np.issubdtype(np.float32, 'float64')
+ assert not np.issubdtype(np.float32, 'f8')
+ assert not np.issubdtype(np.int32, str)
+ assert not np.issubdtype(np.int32, 'int64')
+ assert not np.issubdtype(np.str_, 'void')
+ # for the following the correct spellings are
+ # np.integer, np.floating, or np.complexfloating respectively:
+ assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_
+ assert not np.issubdtype(np.float32, float)
+ assert not np.issubdtype(np.complex64, complex)
+ assert not np.issubdtype(np.float32, "float")
+ assert not np.issubdtype(np.float64, "f")
+
+ # Test the same for the correct first datatype and abstract one
+ # in the case of int, float, complex:
+ assert np.issubdtype(np.float64, 'float64')
+ assert np.issubdtype(np.float64, 'f8')
+ assert np.issubdtype(np.str_, str)
+ assert np.issubdtype(np.int64, 'int64')
+ assert np.issubdtype(np.void, 'void')
+ assert np.issubdtype(np.int8, np.integer)
+ assert np.issubdtype(np.float32, np.floating)
+ assert np.issubdtype(np.complex64, np.complexfloating)
+ assert np.issubdtype(np.float64, "float")
+ assert np.issubdtype(np.float32, "f")
+
+
+class TestSctypeDict:
def test_longdouble(self):
assert_(np.sctypeDict['f8'] is not np.longdouble)
assert_(np.sctypeDict['c16'] is not np.clongdouble)
-class TestBitName(object):
+class TestBitName:
def test_abstract(self):
assert_raises(ValueError, np.core.numerictypes.bitname, np.floating)
-class TestMaximumSctype(object):
+class TestMaximumSctype:
# note that parametrizing with sctype['int'] and similar would skip types
# with the same size (gh-11923)
@@ -444,7 +468,7 @@ class TestMaximumSctype(object):
assert_equal(np.maximum_sctype(t), t)
-class Test_sctype2char(object):
+class Test_sctype2char:
# This function is old enough that we're really just documenting the quirks
# at this point.
@@ -491,8 +515,9 @@ def test_issctype(rep, expected):
@pytest.mark.skipif(sys.flags.optimize > 1,
reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
-@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
-class TestDocStrings(object):
+@pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
+class TestDocStrings:
def test_platform_dependent_aliases(self):
if np.int64 is np.int_:
assert_('int64' in np.int_.__doc__)
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index 63b0e4539..7e73d8c03 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import inspect
import sys
from unittest import mock
@@ -36,7 +34,7 @@ def dispatched_two_arg(array1, array2):
return 'original'
-class TestGetImplementingArgs(object):
+class TestGetImplementingArgs:
def test_ndarray(self):
array = np.array(1)
@@ -77,7 +75,7 @@ class TestGetImplementingArgs(object):
def test_ndarray_and_duck_array(self):
- class Other(object):
+ class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
@@ -94,7 +92,7 @@ class TestGetImplementingArgs(object):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
- class Other(object):
+ class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
@@ -108,7 +106,7 @@ class TestGetImplementingArgs(object):
def test_many_duck_arrays(self):
- class A(object):
+ class A:
__array_function__ = _return_not_implemented
class B(A):
@@ -117,7 +115,7 @@ class TestGetImplementingArgs(object):
class C(A):
__array_function__ = _return_not_implemented
- class D(object):
+ class D:
__array_function__ = _return_not_implemented
a = A()
@@ -147,12 +145,12 @@ class TestGetImplementingArgs(object):
_get_implementing_args(relevant_args)
-class TestNDArrayArrayFunction(object):
+class TestNDArrayArrayFunction:
@requires_array_function
def test_method(self):
- class Other(object):
+ class Other:
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
@@ -209,7 +207,7 @@ class TestNDArrayArrayFunction(object):
@requires_array_function
-class TestArrayFunctionDispatch(object):
+class TestArrayFunctionDispatch:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
@@ -224,7 +222,7 @@ class TestArrayFunctionDispatch(object):
def test_interface(self):
- class MyArray(object):
+ class MyArray:
def __array_function__(self, func, types, args, kwargs):
return (self, func, types, args, kwargs)
@@ -239,7 +237,7 @@ class TestArrayFunctionDispatch(object):
def test_not_implemented(self):
- class MyArray(object):
+ class MyArray:
def __array_function__(self, func, types, args, kwargs):
return NotImplemented
@@ -249,7 +247,7 @@ class TestArrayFunctionDispatch(object):
@requires_array_function
-class TestVerifyMatchingSignatures(object):
+class TestVerifyMatchingSignatures:
def test_verify_matching_signatures(self):
@@ -283,7 +281,7 @@ def _new_duck_type_and_implements():
"""Create a duck array type and implements functions."""
HANDLED_FUNCTIONS = {}
- class MyArray(object):
+ class MyArray:
def __array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
@@ -302,7 +300,7 @@ def _new_duck_type_and_implements():
@requires_array_function
-class TestArrayFunctionImplementation(object):
+class TestArrayFunctionImplementation:
def test_one_arg(self):
MyArray, implements = _new_duck_type_and_implements()
@@ -355,7 +353,7 @@ class TestArrayFunctionImplementation(object):
func(MyArray())
-class TestNDArrayMethods(object):
+class TestNDArrayMethods:
def test_repr(self):
# gh-12162: should still be defined even if __array_function__ doesn't
@@ -370,7 +368,7 @@ class TestNDArrayMethods(object):
assert_equal(str(array), '1')
-class TestNumPyFunctions(object):
+class TestNumPyFunctions:
def test_set_module(self):
assert_equal(np.sum.__module__, 'numpy')
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index c5c091e13..89a8b48bf 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import pytest
@@ -9,10 +7,7 @@ from numpy.testing import assert_, assert_equal
from numpy.core.tests._locales import CommaDecimalPointLocale
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from StringIO import StringIO
+from io import StringIO
_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
diff --git a/numpy/core/tests/test_protocols.py b/numpy/core/tests/test_protocols.py
new file mode 100644
index 000000000..55a2bcf72
--- /dev/null
+++ b/numpy/core/tests/test_protocols.py
@@ -0,0 +1,44 @@
+import pytest
+import warnings
+import numpy as np
+
+
+@pytest.mark.filterwarnings("error")
+def test_getattr_warning():
+ # issue gh-14735: make sure we clear only getattr errors, and let warnings
+ # through
+ class Wrapper:
+ def __init__(self, array):
+ self.array = array
+
+ def __len__(self):
+ return len(self.array)
+
+ def __getitem__(self, item):
+ return type(self)(self.array[item])
+
+ def __getattr__(self, name):
+ if name.startswith("__array_"):
+ warnings.warn("object got converted", UserWarning, stacklevel=1)
+
+ return getattr(self.array, name)
+
+ def __repr__(self):
+ return "<Wrapper({self.array})>".format(self=self)
+
+ array = Wrapper(np.arange(10))
+ with pytest.raises(UserWarning, match="object got converted"):
+ np.asarray(array)
+
+
+def test_array_called():
+ class Wrapper:
+ val = '0' * 100
+ def __array__(self, result=None):
+ return np.array([self.val], dtype=object)
+
+
+ wrapped = Wrapper()
+ arr = np.array(wrapped, dtype=str)
+ assert arr.dtype == 'U100'
+ assert arr[0] == Wrapper.val
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index c1b794145..4350a3407 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -1,37 +1,25 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
-try:
- # Accessing collections abstract classes from collections
- # has been deprecated since Python 3.3
- import collections.abc as collections_abc
-except ImportError:
- import collections as collections_abc
+import collections.abc
import textwrap
from os import path
+from pathlib import Path
import pytest
import numpy as np
-from numpy.compat import Path
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
- assert_raises, temppath
+ assert_raises, temppath,
)
from numpy.compat import pickle
-class TestFromrecords(object):
+class TestFromrecords:
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
assert_equal(r[0].item(), (456, 'dbe', 1.2))
assert_equal(r['col1'].dtype.kind, 'i')
- if sys.version_info[0] >= 3:
- assert_equal(r['col2'].dtype.kind, 'U')
- assert_equal(r['col2'].dtype.itemsize, 12)
- else:
- assert_equal(r['col2'].dtype.kind, 'S')
- assert_equal(r['col2'].dtype.itemsize, 3)
+ assert_equal(r['col2'].dtype.kind, 'U')
+ assert_equal(r['col2'].dtype.itemsize, 12)
assert_equal(r['col3'].dtype.kind, 'f')
def test_fromrecords_0len(self):
@@ -258,7 +246,7 @@ class TestFromrecords(object):
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
- assert_(isinstance(ra.field, collections_abc.Callable))
+ assert_(isinstance(ra.field, collections.abc.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
@@ -325,8 +313,7 @@ class TestFromrecords(object):
assert_equal(rec['f1'], [b'', b'', b''])
-@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
-class TestPathUsage(object):
+class TestPathUsage:
# Test that pathlib.Path can be used
def test_tofile_fromfile(self):
with temppath(suffix='.bin') as path:
@@ -342,7 +329,7 @@ class TestPathUsage(object):
assert_array_equal(x, a)
-class TestRecord(object):
+class TestRecord:
def setup(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
@@ -416,6 +403,22 @@ class TestRecord(object):
assert_(pa.flags.writeable)
assert_(pa.flags.aligned)
+ def test_pickle_void(self):
+ # issue gh-13593
+ dt = np.dtype([('obj', 'O'), ('int', 'i')])
+ a = np.empty(1, dtype=dt)
+ data = (bytearray(b'eman'),)
+ a['obj'] = data
+ a['int'] = 42
+ ctor, args = a[0].__reduce__()
+ # check the constructor is what we expect before interpreting the arguments
+ assert ctor is np.core.multiarray.scalar
+ dtype, obj = args
+ # make sure we did not pickle the address
+ assert not isinstance(obj, bytes)
+
+ assert_raises(TypeError, ctor, dtype, 13)
+
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 9dc231deb..4d7639e43 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import copy
import sys
import gc
@@ -16,14 +14,15 @@ from numpy.testing import (
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT,
)
-from numpy.compat import asbytes, asunicode, long, pickle
+from numpy.testing._private.utils import _no_tracing
+from numpy.compat import asbytes, asunicode, pickle
try:
RecursionError
except NameError:
RecursionError = RuntimeError # python < 3.5
-class TestRegression(object):
+class TestRegression:
def test_invalid_round(self):
# Ticket #3
v = 4.7599999999999998
@@ -37,11 +36,10 @@ class TestRegression(object):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
- f = BytesIO()
- pickle.dump(a, f, protocol=proto)
- f.seek(0)
- b = pickle.load(f)
- f.close()
+ with BytesIO() as f:
+ pickle.dump(a, f, protocol=proto)
+ f.seek(0)
+ b = pickle.load(f)
assert_array_equal(a, b)
def test_typeNA(self):
@@ -95,11 +93,10 @@ class TestRegression(object):
# Ticket #50
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
- f = BytesIO()
- pickle.dump(ca, f, protocol=proto)
- f.seek(0)
- ca = np.load(f, allow_pickle=True)
- f.close()
+ with BytesIO() as f:
+ pickle.dump(ca, f, protocol=proto)
+ f.seek(0)
+ ca = np.load(f, allow_pickle=True)
def test_noncontiguous_fill(self):
# Ticket #58.
@@ -359,11 +356,10 @@ class TestRegression(object):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
- f = BytesIO()
- pickle.dump(dt, f, protocol=proto)
- f.seek(0)
- dt_ = pickle.load(f)
- f.close()
+ with BytesIO() as f:
+ pickle.dump(dt, f, protocol=proto)
+ f.seek(0)
+ dt_ = pickle.load(f)
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self):
@@ -427,7 +423,7 @@ class TestRegression(object):
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
- class BuggySequence(object):
+ class BuggySequence:
def __len__(self):
return 4
@@ -488,15 +484,13 @@ class TestRegression(object):
b"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb."),
]
- if sys.version_info[:2] >= (3, 4):
- # encoding='bytes' was added in Py3.4
- for original, data in test_data:
- result = pickle.loads(data, encoding='bytes')
- assert_equal(result, original)
+ for original, data in test_data:
+ result = pickle.loads(data, encoding='bytes')
+ assert_equal(result, original)
- if isinstance(result, np.ndarray) and result.dtype.names is not None:
- for name in result.dtype.names:
- assert_(isinstance(name, str))
+ if isinstance(result, np.ndarray) and result.dtype.names is not None:
+ for name in result.dtype.names:
+ assert_(isinstance(name, str))
def test_pickle_dtype(self):
# Ticket #251
@@ -1040,7 +1034,7 @@ class TestRegression(object):
def test_mem_custom_float_to_array(self):
# Ticket 702
- class MyFloat(object):
+ class MyFloat:
def __float__(self):
return 1.0
@@ -1049,7 +1043,7 @@ class TestRegression(object):
def test_object_array_refcount_self_assign(self):
# Ticket #711
- class VictimObject(object):
+ class VictimObject:
deleted = False
def __del__(self):
@@ -1107,14 +1101,8 @@ class TestRegression(object):
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
- if sys.version_info[0] >= 3:
- f = open(filename, 'rb')
+ with open(filename, 'rb') as f:
xp = pickle.load(f, encoding='latin1')
- f.close()
- else:
- f = open(filename)
- xp = pickle.load(f)
- f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
@@ -1231,10 +1219,7 @@ class TestRegression(object):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
x = np.array([(b'a', u'b')], dtype=t)
- if sys.version_info[0] >= 3:
- assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
- else:
- assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
+ assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
def test_sign_for_complex_nan(self):
# Ticket 794.
@@ -1316,6 +1301,7 @@ class TestRegression(object):
assert_(pickle.loads(
pickle.dumps(test_record, protocol=proto)) == test_record)
+ @_no_tracing
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
@@ -1365,13 +1351,13 @@ class TestRegression(object):
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
# 0d arrays.
- a = np.array((np.ones(2), np.array(2)))
+ a = np.array((np.ones(2), np.array(2)), dtype=object)
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
- a = np.array(((1,), np.array(1)))
+ a = np.array(((1,), np.array(1)), dtype=object)
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
@@ -1379,7 +1365,7 @@ class TestRegression(object):
def test_array_from_sequence_scalar_array2(self):
# Ticket #1081: weird array with strange input...
- t = np.array([np.array([]), np.array(0, object)])
+ t = np.array([np.array([]), np.array(0, object)], dtype=object)
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
@@ -1422,6 +1408,13 @@ class TestRegression(object):
dtype='U')
assert_raises(UnicodeEncodeError, np.array, a, 'S4')
+ def test_unicode_to_string_cast_error(self):
+ # gh-15790
+ a = np.array([u'\x80'] * 129, dtype='U3')
+ assert_raises(UnicodeEncodeError, np.array, a, 'S')
+ b = a.reshape(3, 43)[:-1, :-1]
+ assert_raises(UnicodeEncodeError, np.array, b, 'S')
+
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert_(a.itemsize == 16)
@@ -1511,14 +1504,11 @@ class TestRegression(object):
min //= -1
with np.errstate(divide="ignore"):
- for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):
+ for t in (np.int8, np.int16, np.int32, np.int64, int):
test_type(t)
def test_buffer_hashlib(self):
- try:
- from hashlib import md5
- except ImportError:
- from md5 import new as md5
+ from hashlib import md5
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
@@ -1813,13 +1803,7 @@ class TestRegression(object):
a = np.array(0, dtype=object)
a[()] = a
assert_raises(RecursionError, int, a)
- assert_raises(RecursionError, long, a)
assert_raises(RecursionError, float, a)
- if sys.version_info.major == 2:
- # in python 3, this falls back on operator.index, which fails on
- # on dtype=object
- assert_raises(RecursionError, oct, a)
- assert_raises(RecursionError, hex, a)
a[()] = None
def test_object_array_circular_reference(self):
@@ -1844,13 +1828,7 @@ class TestRegression(object):
b = np.array(0, dtype=object)
a[()] = b
assert_equal(int(a), int(0))
- assert_equal(long(a), long(0))
assert_equal(float(a), float(0))
- if sys.version_info.major == 2:
- # in python 3, this falls back on operator.index, which fails on
- # on dtype=object
- assert_equal(oct(a), oct(0))
- assert_equal(hex(a), hex(0))
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
@@ -1954,13 +1932,12 @@ class TestRegression(object):
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
- if sys.version_info[0] >= 3:
- for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
- data = np.array([1], dtype='b')
- data = pickle.loads(pickle.dumps(data, protocol=proto))
- data[0] = 0xdd
- bytestring = "\x01 ".encode('ascii')
- assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ data = np.array([1], dtype='b')
+ data = pickle.loads(pickle.dumps(data, protocol=proto))
+ data[0] = 0xdd
+ bytestring = "\x01 ".encode('ascii')
+ assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
@@ -1971,12 +1948,11 @@ class TestRegression(object):
b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
b"p13\ntp14\nb.")
- if sys.version_info[0] >= 3:
- # This should work:
- result = pickle.loads(data, encoding='latin1')
- assert_array_equal(result, np.array([129], dtype='b'))
- # Should not segfault:
- assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
+ # This should work:
+ result = pickle.loads(data, encoding='latin1')
+ assert_array_equal(result, np.array([129], dtype='b'))
+ # Should not segfault:
+ assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
@@ -2003,25 +1979,24 @@ class TestRegression(object):
b"tp8\nRp9\n."),
'different'),
]
- if sys.version_info[0] >= 3:
- for original, data, koi8r_validity in datas:
- result = pickle.loads(data, encoding='latin1')
- assert_equal(result, original)
-
- # Decoding under non-latin1 encoding (e.g.) KOI8-R can
- # produce bad results, but should not segfault.
- if koi8r_validity == 'different':
- # Unicode code points happen to lie within latin1,
- # but are different in koi8-r, resulting to silent
- # bogus results
- result = pickle.loads(data, encoding='koi8-r')
- assert_(result != original)
- elif koi8r_validity == 'invalid':
- # Unicode code points outside latin1, so results
- # to an encoding exception
- assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
- else:
- raise ValueError(koi8r_validity)
+ for original, data, koi8r_validity in datas:
+ result = pickle.loads(data, encoding='latin1')
+ assert_equal(result, original)
+
+ # Decoding under non-latin1 encoding (e.g.) KOI8-R can
+ # produce bad results, but should not segfault.
+ if koi8r_validity == 'different':
+ # Unicode code points happen to lie within latin1,
+ # but are different in koi8-r, resulting to silent
+ # bogus results
+ result = pickle.loads(data, encoding='koi8-r')
+ assert_(result != original)
+ elif koi8r_validity == 'invalid':
+ # Unicode code points outside latin1, so results
+ # to an encoding exception
+ assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
+ else:
+ raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
@@ -2094,10 +2069,7 @@ class TestRegression(object):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for NumPy's four byte unicode.
- if sys.version_info[0] >= 3:
- a = np.array(['abcd'])
- else:
- a = np.array([u'abcd'])
+ a = np.array(['abcd'])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
@@ -2112,7 +2084,7 @@ class TestRegression(object):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
- a = np.array(['abc'], dtype=np.unicode)[0]
+ a = np.array(['abc'], dtype=np.unicode_)[0]
del a
def test_refcount_error_in_clip(self):
@@ -2231,7 +2203,7 @@ class TestRegression(object):
import operator as op
# dummy class where __array__ throws exception
- class Foo(object):
+ class Foo:
__array_priority__ = 1002
def __array__(self, *args, **kwargs):
@@ -2240,12 +2212,7 @@ class TestRegression(object):
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
- if sys.version_info[0] >= 3:
- assert_raises(TypeError, f, lhs, rhs)
- elif not sys.py3kwarning:
- # With -3 switch in python 2, DeprecationWarning is raised
- # which we are not interested in
- f(lhs, rhs)
+ assert_raises(TypeError, f, lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
@@ -2288,9 +2255,10 @@ class TestRegression(object):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
- a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])
+ a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]], dtype=object)
assert_equal(uf(a), ())
- assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
+ expected = np.array([[3, 2, 1], [5, 4], [9, 7, 8, 6]], dtype=object)
+ assert_array_equal(a, expected)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_leak_in_structured_dtype_comparison(self):
@@ -2465,8 +2433,11 @@ class TestRegression(object):
x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])
- assert_raises(ValueError, np.ediff1d, x, to_begin=(1<<20))
- assert_raises(ValueError, np.ediff1d, x, to_end=(1<<20))
+
+ # The use of safe casting means, that 1<<20 is cast unsafely, an
+ # error may be better, but currently there is no mechanism for it.
+ res = np.ediff1d(x, to_begin=(1<<20), to_end=(1<<20))
+ assert_equal(res, [0, 1, 2, 3, -7, 0])
def test_pickle_datetime64_array(self):
# gh-12745 (would fail with pickle5 installed)
@@ -2477,7 +2448,7 @@ class TestRegression(object):
assert_equal(pickle.loads(dumped), arr)
def test_bad_array_interface(self):
- class T(object):
+ class T:
__array_interface__ = {}
np.array([T()])
@@ -2499,8 +2470,9 @@ class TestRegression(object):
t = T()
- #gh-13659, would raise in broadcasting [x=t for x in result]
- np.array([t])
+ # gh-13659, would raise in broadcasting [x=t for x in result]
+ arr = np.array([t])
+ assert arr.shape == (1, 0, 0)
@pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
@pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8),
diff --git a/numpy/core/tests/test_scalar_ctors.py b/numpy/core/tests/test_scalar_ctors.py
index b21bc9dad..7645a0853 100644
--- a/numpy/core/tests/test_scalar_ctors.py
+++ b/numpy/core/tests/test_scalar_ctors.py
@@ -1,18 +1,14 @@
"""
Test the scalar constructors, which also do type-coercion
"""
-from __future__ import division, absolute_import, print_function
-
-import sys
-import platform
import pytest
import numpy as np
from numpy.testing import (
- assert_equal, assert_almost_equal, assert_raises, assert_warns,
+ assert_equal, assert_almost_equal, assert_warns,
)
-class TestFromString(object):
+class TestFromString:
def test_floating(self):
# Ticket #640, floats from string
fsingle = np.single('1.234')
@@ -42,21 +38,41 @@ class TestFromString(object):
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
assert_equal(flongdouble, -np.inf)
- @pytest.mark.skipif((sys.version_info[0] >= 3)
- or (sys.platform == "win32"
- and platform.architecture()[0] == "64bit"),
- reason="numpy.intp('0xff', 16) not supported on Py3 "
- "or 64 bit Windows")
- def test_intp(self):
- # Ticket #99
- i_width = np.int_(0).nbytes*2 - 1
- np.intp('0x' + 'f'*i_width, 16)
- assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
- assert_raises(ValueError, np.intp, '0x1', 32)
- assert_equal(255, np.intp('0xFF', 16))
+
+class TestExtraArgs:
+ def test_superclass(self):
+ # try both positional and keyword arguments
+ s = np.str_(b'\\x61', encoding='unicode-escape')
+ assert s == 'a'
+ s = np.str_(b'\\x61', 'unicode-escape')
+ assert s == 'a'
+
+ # previously this would return '\\xx'
+ with pytest.raises(UnicodeDecodeError):
+ np.str_(b'\\xx', encoding='unicode-escape')
+ with pytest.raises(UnicodeDecodeError):
+ np.str_(b'\\xx', 'unicode-escape')
+
+ # superclass fails, but numpy succeeds
+ assert np.bytes_(-2) == b'-2'
+
+ def test_datetime(self):
+ dt = np.datetime64('2000-01', ('M', 2))
+ assert np.datetime_data(dt) == ('M', 2)
+
+ with pytest.raises(TypeError):
+ np.datetime64('2000', garbage=True)
+
+ def test_bool(self):
+ with pytest.raises(TypeError):
+ np.bool(False, garbage=True)
+
+ def test_void(self):
+ with pytest.raises(TypeError):
+ np.void(b'test', garbage=True)
-class TestFromInt(object):
+class TestFromInt:
def test_intp(self):
# Ticket #99
assert_equal(1024, np.intp(1024))
diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py
index 93434dd1b..4f5fd2988 100644
--- a/numpy/core/tests/test_scalar_methods.py
+++ b/numpy/core/tests/test_scalar_methods.py
@@ -1,22 +1,16 @@
"""
Test the scalar constructors, which also do type-coercion
"""
-from __future__ import division, absolute_import, print_function
-
-import os
import fractions
import platform
import pytest
import numpy as np
-from numpy.testing import (
- run_module_suite,
- assert_equal, assert_almost_equal, assert_raises, assert_warns,
- dec
-)
+from numpy.testing import assert_equal, assert_raises
+
-class TestAsIntegerRatio(object):
+class TestAsIntegerRatio:
# derived in part from the cpython test "test_floatasratio"
@pytest.mark.parametrize("ftype", [
diff --git a/numpy/core/tests/test_scalarbuffer.py b/numpy/core/tests/test_scalarbuffer.py
index 3ded7eecd..b1c1bbbb1 100644
--- a/numpy/core/tests/test_scalarbuffer.py
+++ b/numpy/core/tests/test_scalarbuffer.py
@@ -1,7 +1,6 @@
"""
Test scalar buffer interface adheres to PEP 3118
"""
-import sys
import numpy as np
import pytest
@@ -31,9 +30,7 @@ scalars_and_codes = [
scalars_only, codes_only = zip(*scalars_and_codes)
-@pytest.mark.skipif(sys.version_info.major < 3,
- reason="Python 2 scalars lack a buffer interface")
-class TestScalarPEP3118(object):
+class TestScalarPEP3118:
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_match_array(self, scalar):
@@ -79,27 +76,44 @@ class TestScalarPEP3118(object):
assert_equal(mv_x.itemsize, mv_a.itemsize)
assert_equal(mv_x.format, mv_a.format)
+ def _as_dict(self, m):
+ return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
+ ndim=m.ndim, format=m.format)
+
def test_datetime_memoryview(self):
# gh-11656
# Values verified with v1.13.3, shape is not () as in test_scalar_dim
- def as_dict(m):
- return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
- ndim=m.ndim, format=m.format)
dt1 = np.datetime64('2016-01-01')
dt2 = np.datetime64('2017-01-01')
- expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1,
- 'shape': (8,), 'format': 'B'}
+ expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,),
+ format='B')
v = memoryview(dt1)
- res = as_dict(v)
- assert_equal(res, expected)
+ assert self._as_dict(v) == expected
v = memoryview(dt2 - dt1)
- res = as_dict(v)
- assert_equal(res, expected)
+ assert self._as_dict(v) == expected
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(1, dt)
# Fails to create a PEP 3118 valid buffer
assert_raises((ValueError, BufferError), memoryview, a[0])
+ @pytest.mark.parametrize('s', [
+ pytest.param("\x32\x32", id="ascii"),
+ pytest.param("\uFE0F\uFE0F", id="basic multilingual"),
+ pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"),
+ ])
+ def test_str_ucs4(self, s):
+ s = np.str_(s) # only our subclass implements the buffer protocol
+
+ # all the same, characters always encode as ucs4
+ expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w')
+
+ v = memoryview(s)
+ assert self._as_dict(v) == expected
+
+ # integers of the paltform-appropriate endianness
+ code_points = np.frombuffer(v, dtype='i4')
+
+ assert_equal(code_points, [ord(c) for c in s])
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index 9e32cf624..74829986c 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -2,13 +2,13 @@
""" Test printing of scalar types.
"""
-from __future__ import division, absolute_import, print_function
+import pytest
import numpy as np
from numpy.testing import assert_
-class A(object):
+class A:
pass
class B(A, np.float64):
pass
@@ -23,7 +23,15 @@ class B0(np.float64, A):
class C0(B0):
pass
-class TestInherit(object):
+class HasNew:
+ def __new__(cls, *args, **kwargs):
+ return cls, args, kwargs
+
+class B1(np.float64, HasNew):
+ pass
+
+
+class TestInherit:
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
@@ -38,8 +46,17 @@ class TestInherit(object):
y = C0(2.0)
assert_(str(y) == '2.0')
+ def test_gh_15395(self):
+ # HasNew is the second base, so `np.float64` should have priority
+ x = B1(1.0)
+ assert_(str(x) == '1.0')
+
+ # previously caused RecursionError!?
+ with pytest.raises(TypeError):
+ B1(1.0, 2.0)
+
-class TestCharacter(object):
+class TestCharacter:
def test_char_radd(self):
# GH issue 9620, reached gentype_add and raise TypeError
np_s = np.string_('abc')
@@ -68,8 +85,7 @@ class TestCharacter(object):
def test_char_repeat(self):
np_s = np.string_('abc')
np_u = np.unicode_('abc')
- np_i = np.int(5)
res_s = b'abc' * 5
res_u = u'abc' * 5
- assert_(np_s * np_i == res_s)
- assert_(np_u * np_i == res_u)
+ assert_(np_s * 5 == res_s)
+ assert_(np_u * 5 == res_u)
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 854df5590..c7f44cf50 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import warnings
import itertools
@@ -11,7 +9,7 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
- assert_warns
+ assert_warns, assert_raises_regex,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
@@ -25,7 +23,7 @@ complex_floating_types = np.complexfloating.__subclasses__()
# This compares scalarmath against ufuncs.
-class TestTypes(object):
+class TestTypes:
def test_types(self):
for atype in types:
a = atype(1)
@@ -64,7 +62,7 @@ class TestTypes(object):
np.add(1, 1)
-class TestBaseMath(object):
+class TestBaseMath:
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
@@ -86,7 +84,7 @@ class TestBaseMath(object):
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
# skip true divide for ints
- if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):
+ if dt != np.int32:
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
@@ -110,7 +108,7 @@ class TestBaseMath(object):
np.add(d, np.ones_like(d))
-class TestPower(object):
+class TestPower:
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
@@ -202,7 +200,7 @@ def _signs(dt):
return (+1, -1)
-class TestModulus(object):
+class TestModulus:
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -293,8 +291,18 @@ class TestModulus(object):
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
+ def test_inplace_floordiv_handling(self):
+ # issue gh-12927
+ # this only applies to in-place floordiv //=, because the output type
+ # promotes to float which does not fit
+ a = np.array([1, 2], np.int64)
+ b = np.array([1, 2], np.uint64)
+ pattern = 'could not be coerced to provided output parameter'
+ with assert_raises_regex(TypeError, pattern):
+ a //= b
+
-class TestComplexDivision(object):
+class TestComplexDivision:
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
@@ -366,7 +374,7 @@ class TestComplexDivision(object):
assert_equal(result.imag, ex[1])
-class TestConversion(object):
+class TestConversion:
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
@@ -502,7 +510,7 @@ class TestConversion(object):
assert_(np.equal(np.datetime64('NaT'), None))
-#class TestRepr(object):
+#class TestRepr:
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
@@ -511,7 +519,7 @@ class TestConversion(object):
# assert_equal( val, val2 )
-class TestRepr(object):
+class TestRepr:
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
@@ -546,7 +554,7 @@ class TestRepr(object):
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
- class TestSizeOf(object):
+ class TestSizeOf:
def test_equal_nbytes(self):
for type in types:
@@ -558,7 +566,7 @@ if not IS_PYPY:
assert_raises(TypeError, d.__sizeof__, "a")
-class TestMultiply(object):
+class TestMultiply:
def test_seq_repeat(self):
# Test that basic sequences get repeated when multiplied with
# numpy integers. And errors are raised when multiplied with others.
@@ -595,7 +603,7 @@ class TestMultiply(object):
# Test that an array-like which does not know how to be multiplied
# does not attempt sequence repeat (raise TypeError).
# See also gh-7428.
- class ArrayLike(object):
+ class ArrayLike:
def __init__(self, arr):
self.arr = arr
def __array__(self):
@@ -609,7 +617,7 @@ class TestMultiply(object):
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
-class TestNegative(object):
+class TestNegative:
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.neg, a)
@@ -623,7 +631,7 @@ class TestNegative(object):
assert_equal(operator.neg(a) + a, 0)
-class TestSubtract(object):
+class TestSubtract:
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.sub, a, a)
@@ -637,7 +645,7 @@ class TestSubtract(object):
assert_equal(operator.sub(a, a), 0)
-class TestAbs(object):
+class TestAbs:
def _test_abs_func(self, absfunc):
for tp in floating_types + complex_floating_types:
x = tp(-1.5)
@@ -666,7 +674,7 @@ class TestAbs(object):
self._test_abs_func(np.abs)
-class TestBitShifts(object):
+class TestBitShifts:
@pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
@pytest.mark.parametrize('op',
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index 86b0ca199..6502ec4c1 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -2,17 +2,16 @@
""" Test printing of scalar types.
"""
-from __future__ import division, absolute_import, print_function
-
-import code, sys
+import code
import platform
import pytest
+import sys
from tempfile import TemporaryFile
import numpy as np
-from numpy.testing import assert_, assert_equal, suppress_warnings
+from numpy.testing import assert_, assert_equal
-class TestRealScalars(object):
+class TestRealScalars:
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
@@ -32,12 +31,12 @@ class TestRealScalars(object):
def test_scalar_cutoffs(self):
# test that both the str and repr of np.float64 behaves
- # like python floats in python3. Note that in python2
- # the str has truncated digits, but we do not do this
+ # like python floats in python3.
def check(v):
- # we compare str to repr, to avoid python2 truncation behavior
+ assert_equal(str(np.float64(v)), str(v))
assert_equal(str(np.float64(v)), repr(v))
assert_equal(repr(np.float64(v)), repr(v))
+ assert_equal(repr(np.float64(v)), str(v))
# check we use the same number of significant digits
check(1.12345678901234567890)
@@ -84,10 +83,7 @@ class TestRealScalars(object):
orig_stdout, orig_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = fo, fe
- # py2 code.interact sends irrelevant internal DeprecationWarnings
- with suppress_warnings() as sup:
- sup.filter(DeprecationWarning)
- code.interact(local={'np': np}, readfunc=input_func, banner='')
+ code.interact(local={'np': np}, readfunc=input_func, banner='')
sys.stdout, sys.stderr = orig_stdout, orig_stderr
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index 53d272fc5..546ecf001 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -1,7 +1,4 @@
-from __future__ import division, absolute_import, print_function
-
import pytest
-import sys
import numpy as np
from numpy.core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
@@ -14,9 +11,8 @@ from numpy.testing import (
assert_raises_regex, assert_warns
)
-from numpy.compat import long
-class TestAtleast1d(object):
+class TestAtleast1d:
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -52,12 +48,11 @@ class TestAtleast1d(object):
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
- assert_(atleast_1d(long(3)).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
-class TestAtleast2d(object):
+class TestAtleast2d:
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -96,7 +91,7 @@ class TestAtleast2d(object):
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
-class TestAtleast3d(object):
+class TestAtleast3d:
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -128,7 +123,7 @@ class TestAtleast3d(object):
assert_array_equal(res, desired)
-class TestHstack(object):
+class TestHstack:
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
@@ -159,13 +154,11 @@ class TestHstack(object):
def test_generator(self):
with assert_warns(FutureWarning):
hstack((np.arange(3) for _ in range(2)))
- if sys.version_info.major > 2:
- # map returns a list on Python 2
- with assert_warns(FutureWarning):
- hstack(map(lambda x: x, np.ones((3, 2))))
+ with assert_warns(FutureWarning):
+ hstack(map(lambda x: x, np.ones((3, 2))))
-class TestVstack(object):
+class TestVstack:
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
@@ -205,7 +198,7 @@ class TestVstack(object):
vstack((np.arange(3) for _ in range(2)))
-class TestConcatenate(object):
+class TestConcatenate:
def test_returns_copy(self):
a = np.eye(3)
b = np.concatenate([a])
@@ -409,7 +402,7 @@ def test_stack():
assert_array_equal(result, np.array([0, 1, 2]))
-class TestBlock(object):
+class TestBlock:
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
def block(self, request):
# blocking small arrays and large arrays go through different paths.
@@ -707,7 +700,7 @@ class TestBlock(object):
def test_block_dispatcher():
- class ArrayLike(object):
+ class ArrayLike:
pass
a = ArrayLike()
b = ArrayLike()
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 707c690dd..abdaeeb93 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import warnings
import itertools
@@ -18,7 +16,12 @@ from numpy.testing import (
from numpy.compat import pickle
-class TestUfuncKwargs(object):
+UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values()
+ if isinstance(obj, np.ufunc)]
+UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
+
+
+class TestUfuncKwargs:
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
assert_raises(TypeError, np.add, 1, 2, dtypex=int)
@@ -44,7 +47,7 @@ class TestUfuncKwargs(object):
assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
-class TestUfuncGenericLoops(object):
+class TestUfuncGenericLoops:
"""Test generic loops.
The loops to be tested are:
@@ -113,7 +116,7 @@ class TestUfuncGenericLoops(object):
assert_equal(ys.dtype, output_dtype)
# class to use in testing object method loops
- class foo(object):
+ class foo:
def conjugate(self):
return np.bool_(1)
@@ -124,7 +127,7 @@ class TestUfuncGenericLoops(object):
x = np.ones(10, dtype=object)
assert_(np.all(np.abs(x) == 1))
- def test_unary_PyUFunc_O_O_method(self, foo=foo):
+ def test_unary_PyUFunc_O_O_method_simple(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.conjugate(x) == True))
@@ -140,8 +143,41 @@ class TestUfuncGenericLoops(object):
x = np.full((10, 2, 3), foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
+ def test_python_complex_conjugate(self):
+ # The conjugate ufunc should fall back to calling the method:
+ arr = np.array([1+2j, 3-4j], dtype="O")
+ assert isinstance(arr[0], complex)
+ res = np.conjugate(arr)
+ assert res.dtype == np.dtype("O")
+ assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O"))
+
+ @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
+ def test_unary_PyUFunc_O_O_method_full(self, ufunc):
+ """Compare the result of the object loop with non-object one"""
+ val = np.float64(np.pi/4)
+
+ class MyFloat(np.float64):
+ def __getattr__(self, attr):
+ try:
+ return super().__getattr__(attr)
+ except AttributeError:
+ return lambda: getattr(np.core.umath, attr)(val)
+
+ num_arr = np.array([val], dtype=np.float64)
+ obj_arr = np.array([MyFloat(val)], dtype="O")
+
+ with np.errstate(all="raise"):
+ try:
+ res_num = ufunc(num_arr)
+ except Exception as exc:
+ with assert_raises(type(exc)):
+ ufunc(obj_arr)
+ else:
+ res_obj = ufunc(obj_arr)
+ assert_array_equal(res_num.astype("O"), res_obj)
+
-class TestUfunc(object):
+class TestUfunc:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(pickle.dumps(np.sin,
@@ -980,7 +1016,7 @@ class TestUfunc(object):
assert_array_equal(out, mm_row_col_vec.squeeze())
def test_matrix_multiply(self):
- self.compare_matrix_multiply_results(np.long)
+ self.compare_matrix_multiply_results(np.int64)
self.compare_matrix_multiply_results(np.double)
def test_matrix_multiply_umath_empty(self):
@@ -1085,14 +1121,13 @@ class TestUfunc(object):
assert_equal(np.logical_and.reduce(a), None)
def test_object_comparison(self):
- class HasComparisons(object):
+ class HasComparisons:
def __eq__(self, other):
return '=='
arr0d = np.array(HasComparisons())
assert_equal(arr0d == arr0d, True)
assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
- assert_equal(np.equal(arr0d, arr0d, dtype=object), '==')
arr1d = np.array([HasComparisons()])
assert_equal(arr1d == arr1d, np.array([True]))
@@ -1125,14 +1160,18 @@ class TestUfunc(object):
# Twice reproduced also for tuples:
np.add.accumulate(arr, out=arr)
np.add.accumulate(arr, out=arr)
- assert_array_equal(arr, np.array([[1]*i for i in [1, 3, 6, 10]]))
+ assert_array_equal(arr,
+ np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object),
+ )
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
np.add.accumulate(arr, out=arr, axis=-1)
np.add.accumulate(arr, out=arr, axis=-1)
- assert_array_equal(arr[0, :], np.array([[2]*i for i in [1, 3, 6, 10]]))
+ assert_array_equal(arr[0, :],
+ np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object),
+ )
def test_object_array_reduceat_inplace(self):
# Checks that in-place reduceats work, see also gh-7465
@@ -1555,7 +1594,7 @@ class TestUfunc(object):
def test_custom_array_like(self):
- class MyThing(object):
+ class MyThing:
__array_priority__ = 1000
rmul_count = 0
@@ -1776,7 +1815,7 @@ class TestUfunc(object):
assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
out=None)
- # invalid keyord
+ # invalid keyword
assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
assert_raises(TypeError, f, d, invalid=0)
assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
@@ -1851,6 +1890,23 @@ class TestUfunc(object):
assert_equal(y_base[1,:], y_base_copy[1,:])
assert_equal(y_base[3,:], y_base_copy[3,:])
+ @pytest.mark.parametrize('output_shape',
+ [(), (1,), (1, 1), (1, 3), (4, 3)])
+ @pytest.mark.parametrize('f_reduce', [np.add.reduce, np.minimum.reduce])
+ def test_reduce_wrong_dimension_output(self, f_reduce, output_shape):
+ # Test that we're not incorrectly broadcasting dimensions.
+ # See gh-15144 (failed for np.add.reduce previously).
+ a = np.arange(12.).reshape(4, 3)
+ out = np.empty(output_shape, a.dtype)
+ assert_raises(ValueError, f_reduce, a, axis=0, out=out)
+ if output_shape != (1, 3):
+ assert_raises(ValueError, f_reduce, a, axis=0, out=out,
+ keepdims=True)
+ else:
+ check = f_reduce(a, axis=0, out=out, keepdims=True)
+ assert_(check is out)
+ assert_array_equal(check, f_reduce(a, axis=0, keepdims=True))
+
def test_no_doc_string(self):
# gh-9337
assert_('\n' not in umt.inner1d_no_doc.__doc__)
@@ -1947,3 +2003,21 @@ def test_ufunc_noncontiguous(ufunc):
assert_allclose(res_c, res_n, atol=tol, rtol=tol)
else:
assert_equal(c_ar, n_ar)
+
+
+@pytest.mark.parametrize('ufunc', [np.sign, np.equal])
+def test_ufunc_warn_with_nan(ufunc):
+ # issue gh-15127
+ # test that calling certain ufuncs with a non-standard `nan` value does not
+ # emit a warning
+ # `b` holds a 64 bit signaling nan: the most significant bit of the
+ # significand is zero.
+ b = np.array([0x7ff0000000000001], 'i8').view('f8')
+ assert np.isnan(b)
+ if ufunc.nin == 1:
+ ufunc(b)
+ elif ufunc.nin == 2:
+ ufunc(b, b.copy())
+ else:
+ raise ValueError('ufunc with more than 2 inputs')
+
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 9b4ce9e47..233a0b1d6 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import platform
import warnings
import fnmatch
@@ -23,7 +21,7 @@ def on_powerpc():
platform.machine().startswith('ppc')
-class _FilterInvalids(object):
+class _FilterInvalids:
def setup(self):
self.olderr = np.seterr(invalid='ignore')
@@ -31,7 +29,7 @@ class _FilterInvalids(object):
np.seterr(**self.olderr)
-class TestConstants(object):
+class TestConstants:
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
@@ -42,7 +40,7 @@ class TestConstants(object):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
-class TestOut(object):
+class TestOut:
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
@@ -168,15 +166,15 @@ class TestOut(object):
r1, r2 = np.frexp(d, out=o1, subok=subok)
-class TestComparisons(object):
+class TestComparisons:
def test_ignore_object_identity_in_equal(self):
- # Check error raised when comparing identical objects whose comparison
+ # Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.equal, a, a)
# Check error raised when comparing identical non-comparable objects.
- class FunkyType(object):
+ class FunkyType:
def __eq__(self, other):
raise TypeError("I won't compare")
@@ -188,13 +186,13 @@ class TestComparisons(object):
assert_equal(np.equal(a, a), [False])
def test_ignore_object_identity_in_not_equal(self):
- # Check error raised when comparing identical objects whose comparison
+ # Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.not_equal, a, a)
# Check error raised when comparing identical non-comparable objects.
- class FunkyType(object):
+ class FunkyType:
def __ne__(self, other):
raise TypeError("I won't compare")
@@ -206,7 +204,7 @@ class TestComparisons(object):
assert_equal(np.not_equal(a, a), [True])
-class TestAdd(object):
+class TestAdd:
def test_reduce_alignment(self):
# gh-9876
# make sure arrays with weird strides work with the optimizations in
@@ -217,7 +215,7 @@ class TestAdd(object):
assert_equal(a['b'].sum(), 0)
-class TestDivision(object):
+class TestDivision:
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
@@ -284,7 +282,7 @@ def _signs(dt):
return (+1, -1)
-class TestRemainder(object):
+class TestRemainder:
def test_remainder_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -375,7 +373,7 @@ class TestRemainder(object):
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
-class TestCbrt(object):
+class TestCbrt:
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
@@ -388,7 +386,7 @@ class TestCbrt(object):
assert_equal(np.cbrt(-np.inf), -np.inf)
-class TestPower(object):
+class TestPower:
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
@@ -527,7 +525,7 @@ class TestPower(object):
assert_raises(ValueError, np.power, one, minusone)
-class TestFloat_power(object):
+class TestFloat_power:
def test_type_conversion(self):
arg_type = '?bhilBHILefdgFDG'
res_type = 'ddddddddddddgDDG'
@@ -538,7 +536,7 @@ class TestFloat_power(object):
assert_(res.dtype.name == np.dtype(dtout).name, msg)
-class TestLog2(object):
+class TestLog2:
def test_log2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -569,7 +567,7 @@ class TestLog2(object):
assert_(w[2].category is RuntimeWarning)
-class TestExp2(object):
+class TestExp2:
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -621,7 +619,7 @@ class TestLogAddExp2(_FilterInvalids):
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
-class TestLog(object):
+class TestLog:
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -632,7 +630,7 @@ class TestLog(object):
assert_almost_equal(np.log(xf), yf)
-class TestExp(object):
+class TestExp:
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -642,7 +640,7 @@ class TestExp(object):
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
-class TestSpecialFloats(object):
+class TestSpecialFloats:
def test_exp_values(self):
x = [np.nan, np.nan, np.inf, 0.]
y = [np.nan, -np.nan, np.inf, -np.inf]
@@ -744,7 +742,7 @@ avx_ufuncs = {'sqrt' :[1, 0., 100.],
'ceil' :[0, -100., 100.],
'trunc' :[0, -100., 100.]}
-class TestAVXUfuncs(object):
+class TestAVXUfuncs:
def test_avx_based_ufunc(self):
strides = np.array([-4,-3,-2,-1,1,2,3,4])
np.random.seed(42)
@@ -776,7 +774,7 @@ class TestAVXUfuncs(object):
assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
-class TestAVXFloat32Transcendental(object):
+class TestAVXFloat32Transcendental:
def test_exp_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
@@ -792,7 +790,7 @@ class TestAVXFloat32Transcendental(object):
def test_sincos_float32(self):
np.random.seed(42)
N = 1000000
- M = np.int(N/20)
+ M = np.int_(N/20)
index = np.random.randint(low=0, high=N, size=M)
x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
# test coverage for elements > 117435.992f for which glibc is used
@@ -862,7 +860,7 @@ class TestLogAddExp(_FilterInvalids):
assert_equal(np.logaddexp.reduce([]), -np.inf)
-class TestLog1p(object):
+class TestLog1p:
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
@@ -876,7 +874,7 @@ class TestLog1p(object):
assert_equal(ncu.log1p(-np.inf), np.nan)
-class TestExpm1(object):
+class TestExpm1:
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
@@ -888,8 +886,14 @@ class TestExpm1(object):
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(-np.inf), -1.)
+ def test_complex(self):
+ x = np.asarray(1e-12)
+ assert_allclose(x, ncu.expm1(x))
+ x = x.astype(np.complex128)
+ assert_allclose(x, ncu.expm1(x))
+
-class TestHypot(object):
+class TestHypot:
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
@@ -913,7 +917,7 @@ def assert_hypot_isinf(x, y):
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
-class TestHypotSpecialValues(object):
+class TestHypotSpecialValues:
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
@@ -950,7 +954,7 @@ def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
-class TestArctan2SpecialValues(object):
+class TestArctan2SpecialValues:
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
@@ -1019,7 +1023,7 @@ class TestArctan2SpecialValues(object):
assert_arctan2_isnan(np.nan, np.nan)
-class TestLdexp(object):
+class TestLdexp:
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
@@ -1104,6 +1108,19 @@ class TestMaximum(_FilterInvalids):
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
+ def test_strided_array(self):
+ arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
+ arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
+ maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0])
+ out = np.ones(8)
+ out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0])
+ assert_equal(np.maximum(arr1,arr2), maxtrue)
+ assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2])
+ assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0]))
+ assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan]))
+ assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan]))
+ assert_equal(out, out_maxtrue)
+
class TestMinimum(_FilterInvalids):
def test_reduce(self):
@@ -1162,6 +1179,18 @@ class TestMinimum(_FilterInvalids):
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
+ def test_strided_array(self):
+ arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
+ arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
+ mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf])
+ out = np.ones(8)
+ out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0])
+ assert_equal(np.minimum(arr1,arr2), mintrue)
+ assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2])
+ assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0]))
+ assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan]))
+ assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan]))
+ assert_equal(out, out_mintrue)
class TestFmax(_FilterInvalids):
def test_reduce(self):
@@ -1247,7 +1276,7 @@ class TestFmin(_FilterInvalids):
assert_equal(np.fmin(arg1, arg2), out)
-class TestBool(object):
+class TestBool:
def test_exceptions(self):
a = np.ones(1, dtype=np.bool_)
assert_raises(TypeError, np.negative, a)
@@ -1310,7 +1339,7 @@ class TestBool(object):
assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
-class TestBitwiseUFuncs(object):
+class TestBitwiseUFuncs:
bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
@@ -1394,7 +1423,7 @@ class TestBitwiseUFuncs(object):
assert_(type(f.reduce(btype)) is bool, msg)
-class TestInt(object):
+class TestInt:
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
o = np.ones(10 * 2, dtype=bool)
@@ -1405,24 +1434,24 @@ class TestInt(object):
assert_array_equal(o, tgt)
-class TestFloatingPoint(object):
+class TestFloatingPoint:
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
-class TestDegrees(object):
+class TestDegrees:
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
-class TestRadians(object):
+class TestRadians:
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
-class TestHeavside(object):
+class TestHeavside:
def test_heaviside(self):
x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
@@ -1444,7 +1473,7 @@ class TestHeavside(object):
assert_equal(h, expected1.astype(np.float32))
-class TestSign(object):
+class TestSign:
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
@@ -1475,7 +1504,7 @@ class TestSign(object):
assert_raises(TypeError, test_nan)
-class TestMinMax(object):
+class TestMinMax:
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
@@ -1518,7 +1547,7 @@ class TestMinMax(object):
assert_equal(a, np.nan)
-class TestAbsoluteNegative(object):
+class TestAbsoluteNegative:
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
@@ -1560,7 +1589,7 @@ class TestAbsoluteNegative(object):
np.abs(np.ones_like(d), out=d)
-class TestPositive(object):
+class TestPositive:
def test_valid(self):
valid_dtypes = [int, float, complex, object]
for dtype in valid_dtypes:
@@ -1579,10 +1608,10 @@ class TestPositive(object):
np.positive(np.array(['bar'], dtype=object))
-class TestSpecialMethods(object):
+class TestSpecialMethods:
def test_wrap(self):
- class with_wrap(object):
+ class with_wrap:
def __array__(self):
return np.zeros(1)
@@ -1620,7 +1649,7 @@ class TestSpecialMethods(object):
@property
def args(self):
# We need to ensure these are fetched at the same time, before
- # any other ufuncs are calld by the assertions
+ # any other ufuncs are called by the assertions
return (self._prepare_args, self._wrap_args)
def __repr__(self):
return "a" # for short test output
@@ -1686,7 +1715,7 @@ class TestSpecialMethods(object):
def test_old_wrap(self):
- class with_wrap(object):
+ class with_wrap:
def __array__(self):
return np.zeros(1)
@@ -1701,7 +1730,7 @@ class TestSpecialMethods(object):
def test_priority(self):
- class A(object):
+ class A:
def __array__(self):
return np.zeros(1)
@@ -1744,7 +1773,7 @@ class TestSpecialMethods(object):
def test_failing_wrap(self):
- class A(object):
+ class A:
def __array__(self):
return np.zeros(2)
@@ -1776,7 +1805,7 @@ class TestSpecialMethods(object):
def test_none_wrap(self):
# Tests that issue #8507 is resolved. Previously, this would segfault
- class A(object):
+ class A:
def __array__(self):
return np.zeros(1)
@@ -1788,7 +1817,7 @@ class TestSpecialMethods(object):
def test_default_prepare(self):
- class with_wrap(object):
+ class with_wrap:
__array_priority__ = 10
def __array__(self):
@@ -1834,7 +1863,7 @@ class TestSpecialMethods(object):
def test_failing_prepare(self):
- class A(object):
+ class A:
def __array__(self):
return np.zeros(1)
@@ -1844,36 +1873,18 @@ class TestSpecialMethods(object):
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
- def test_array_with_context(self):
+ def test_array_too_many_args(self):
class A(object):
- def __array__(self, dtype=None, context=None):
- func, args, i = context
- self.func = func
- self.args = args
- self.i = i
- return np.zeros(1)
-
- class B(object):
- def __array__(self, dtype=None):
- return np.zeros(1, dtype)
-
- class C(object):
- def __array__(self):
+ def __array__(self, dtype, context):
return np.zeros(1)
a = A()
- ncu.maximum(np.zeros(1), a)
- assert_(a.func is ncu.maximum)
- assert_equal(a.args[0], 0)
- assert_(a.args[1] is a)
- assert_(a.i == 1)
- assert_equal(ncu.maximum(a, B()), 0)
- assert_equal(ncu.maximum(a, C()), 0)
+ assert_raises_regex(TypeError, '2 required positional', np.sum, a)
def test_ufunc_override(self):
# check override works even with instance with high priority.
- class A(object):
+ class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return self, func, method, inputs, kwargs
@@ -1910,7 +1921,7 @@ class TestSpecialMethods(object):
three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
- class A(object):
+ class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "A"
@@ -1918,11 +1929,11 @@ class TestSpecialMethods(object):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "ASub"
- class B(object):
+ class B:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "B"
- class C(object):
+ class C:
def __init__(self):
self.count = 0
@@ -2034,7 +2045,7 @@ class TestSpecialMethods(object):
def test_ufunc_override_methods(self):
- class A(object):
+ class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
@@ -2203,11 +2214,11 @@ class TestSpecialMethods(object):
def test_ufunc_override_out(self):
- class A(object):
+ class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
- class B(object):
+ class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
@@ -2259,7 +2270,7 @@ class TestSpecialMethods(object):
def test_ufunc_override_exception(self):
- class A(object):
+ class A:
def __array_ufunc__(self, *a, **kwargs):
raise ValueError("oops")
@@ -2270,7 +2281,7 @@ class TestSpecialMethods(object):
def test_ufunc_override_not_implemented(self):
- class A(object):
+ class A:
def __array_ufunc__(self, *args, **kwargs):
return NotImplemented
@@ -2287,7 +2298,7 @@ class TestSpecialMethods(object):
def test_ufunc_override_disabled(self):
- class OptOut(object):
+ class OptOut:
__array_ufunc__ = None
opt_out = OptOut()
@@ -2304,7 +2315,7 @@ class TestSpecialMethods(object):
# opt-outs still hold even when other arguments have pathological
# __array_ufunc__ implementations
- class GreedyArray(object):
+ class GreedyArray:
def __array_ufunc__(self, *args, **kwargs):
return self
@@ -2318,7 +2329,7 @@ class TestSpecialMethods(object):
def test_gufunc_override(self):
# gufunc are just ufunc instances, but follow a different path,
# so check __array_ufunc__ overrides them properly.
- class A(object):
+ class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
@@ -2349,7 +2360,7 @@ class TestSpecialMethods(object):
# NOTE: this class is given as an example in doc/subclassing.py;
# if you make any changes here, do update it there too.
class A(np.ndarray):
- def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
args = []
in_no = []
for i, input_ in enumerate(inputs):
@@ -2359,7 +2370,7 @@ class TestSpecialMethods(object):
else:
args.append(input_)
- outputs = kwargs.pop('out', None)
+ outputs = out
out_no = []
if outputs:
out_args = []
@@ -2400,7 +2411,7 @@ class TestSpecialMethods(object):
return results[0] if len(results) == 1 else results
- class B(object):
+ class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if any(isinstance(input_, A) for input_ in inputs):
return "A!"
@@ -2519,14 +2530,14 @@ class TestSpecialMethods(object):
assert_(a.info, {'inputs': [0, 2]})
-class TestChoose(object):
+class TestChoose:
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
-class TestRationalFunctions(object):
+class TestRationalFunctions:
def test_lcm(self):
self._test_lcm_inner(np.int16)
self._test_lcm_inner(np.uint16)
@@ -2625,7 +2636,7 @@ class TestRationalFunctions(object):
assert_equal(np.gcd(2**100, 3**100), 1)
-class TestRoundingFunctions(object):
+class TestRoundingFunctions:
def test_object_direct(self):
""" test direct implementation of these magic methods """
@@ -2661,7 +2672,7 @@ class TestRoundingFunctions(object):
assert_equal(np.trunc(f), -1)
-class TestComplexFunctions(object):
+class TestComplexFunctions:
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
@@ -2844,7 +2855,7 @@ class TestComplexFunctions(object):
check(func, pts, 1+1j)
-class TestAttributes(object):
+class TestAttributes:
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
@@ -2863,7 +2874,7 @@ class TestAttributes(object):
"frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
-class TestSubclass(object):
+class TestSubclass:
def test_subclass_op(self):
@@ -2876,6 +2887,32 @@ class TestSubclass(object):
a = simple((3, 4))
assert_equal(a+a, a)
+
+class TestFrompyfunc(object):
+
+ def test_identity(self):
+ def mul(a, b):
+ return a * b
+
+ # with identity=value
+ mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1)
+ assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
+ assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
+ assert_equal(mul_ufunc.reduce([]), 1)
+
+ # with identity=None (reorderable)
+ mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None)
+ assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
+ assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
+ assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
+
+ # with no identity (not reorderable)
+ mul_ufunc = np.frompyfunc(mul, nin=2, nout=1)
+ assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
+ assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)))
+ assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
+
+
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=complex):
"""
@@ -3120,6 +3157,14 @@ def test_rint_big_int():
# Rint should not change the value
assert_equal(val, np.rint(val))
+@pytest.mark.parametrize('ftype', [np.float32, np.float64])
+def test_memoverlap_accumulate(ftype):
+ # Reproduces bug https://github.com/numpy/numpy/issues/15597
+ arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype)
+ out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype)
+ out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype)
+ assert_equal(np.maximum.accumulate(arr), out_max)
+ assert_equal(np.minimum.accumulate(arr), out_min)
def test_signaling_nan_exceptions():
with assert_no_warnings():
diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py
index 0bab04df2..fd7214396 100644
--- a/numpy/core/tests/test_umath_accuracy.py
+++ b/numpy/core/tests/test_umath_accuracy.py
@@ -3,16 +3,16 @@ import platform
from os import path
import sys
import pytest
-from ctypes import *
+from ctypes import c_float, c_int, cast, pointer, POINTER
from numpy.testing import assert_array_max_ulp
+from numpy.core._multiarray_umath import __cpu_features__
-runtest = sys.platform.startswith('linux') and (platform.machine() == 'x86_64')
+IS_AVX = __cpu_features__.get('AVX512F', False) or \
+ (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False))
+runtest = sys.platform.startswith('linux') and IS_AVX
platform_skip = pytest.mark.skipif(not runtest,
- reason="""
- stick to x86_64 and linux platforms.
- test seems to fail on some of ARM and power
- architectures.
- """)
+ reason="avoid testing inconsistent platform "
+ "library implementations")
# convert string to hex function taken from:
# https://stackoverflow.com/questions/1592158/convert-hex-to-float #
@@ -28,8 +28,8 @@ files = ['umath-validation-set-exp',
'umath-validation-set-sin',
'umath-validation-set-cos']
-class TestAccuracy(object):
- @pytest.mark.xfail(reason="Fails for MacPython/numpy-wheels builds")
+class TestAccuracy:
+ @platform_skip
def test_validate_transcendentals(self):
with np.errstate(all='ignore'):
for filename in files:
@@ -37,18 +37,24 @@ class TestAccuracy(object):
filepath = path.join(data_dir, filename)
with open(filepath) as fid:
file_without_comments = (r for r in fid if not r[0] in ('$', '#'))
- data = np.genfromtxt(file_without_comments,
- dtype=('|S39','|S39','|S39',np.int),
- names=('type','input','output','ulperr'),
- delimiter=',',
- skip_header=1)
- npfunc = getattr(np, filename.split('-')[3])
- for datatype in np.unique(data['type']):
- data_subset = data[data['type'] == datatype]
- inval = np.array(str_to_float(data_subset['input'].astype(str)), dtype=eval(datatype))
- outval = np.array(str_to_float(data_subset['output'].astype(str)), dtype=eval(datatype))
- perm = np.random.permutation(len(inval))
- inval = inval[perm]
- outval = outval[perm]
- maxulperr = data_subset['ulperr'].max()
- assert_array_max_ulp(npfunc(inval), outval, maxulperr)
+ data = np.genfromtxt(file_without_comments,
+ dtype=('|S39','|S39','|S39',int),
+ names=('type','input','output','ulperr'),
+ delimiter=',',
+ skip_header=1)
+ npfunc = getattr(np, filename.split('-')[3])
+ for datatype in np.unique(data['type']):
+ data_subset = data[data['type'] == datatype]
+ inval = np.array(str_to_float(data_subset['input'].astype(str)), dtype=eval(datatype))
+ outval = np.array(str_to_float(data_subset['output'].astype(str)), dtype=eval(datatype))
+ perm = np.random.permutation(len(inval))
+ inval = inval[perm]
+ outval = outval[perm]
+ maxulperr = data_subset['ulperr'].max()
+ assert_array_max_ulp(npfunc(inval), outval, maxulperr)
+
+ def test_ignore_nan_ulperror(self):
+ # Ignore ULP differences between various NAN's
+ nan1_f32 = np.array(str_to_float('0xffffffff'), dtype=np.float32)
+ nan2_f32 = np.array(str_to_float('0x7fddbfbf'), dtype=np.float32)
+ assert_array_max_ulp(nan1_f32, nan2_f32, 0)
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index 1f5b4077f..a21158420 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import platform
import pytest
@@ -8,7 +6,7 @@ import numpy as np
# import the c-extension module directly since _arg is not exported via umath
import numpy.core._multiarray_umath as ncu
from numpy.testing import (
- assert_raises, assert_equal, assert_array_equal, assert_almost_equal
+ assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp
)
# TODO: branch cuts (use Pauli code)
@@ -31,7 +29,7 @@ platform_skip = pytest.mark.skipif(xfail_complex_tests,
-class TestCexp(object):
+class TestCexp:
def test_simple(self):
check = check_complex_value
f = np.exp
@@ -131,7 +129,7 @@ class TestCexp(object):
check(f, np.nan, 0, np.nan, 0)
-class TestClog(object):
+class TestClog:
def test_simple(self):
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
@@ -276,7 +274,7 @@ class TestClog(object):
assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
-class TestCsqrt(object):
+class TestCsqrt:
def test_simple(self):
# sqrt(1)
@@ -356,7 +354,7 @@ class TestCsqrt(object):
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
-class TestCpow(object):
+class TestCpow:
def setup(self):
self.olderr = np.seterr(invalid='ignore')
@@ -396,7 +394,7 @@ class TestCpow(object):
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
-class TestCabs(object):
+class TestCabs:
def setup(self):
self.olderr = np.seterr(invalid='ignore')
@@ -458,7 +456,7 @@ class TestCabs(object):
ref = g(x[i], y[i])
check_real_value(f, x[i], y[i], ref)
-class TestCarg(object):
+class TestCarg:
def test_simple(self):
check_real_value(ncu._arg, 1, 0, 0, False)
check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False)
@@ -542,3 +540,40 @@ def check_complex_value(f, x1, y1, x2, y2, exact=True):
assert_equal(f(z1), z2)
else:
assert_almost_equal(f(z1), z2)
+
+class TestSpecialComplexAVX(object):
+ @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
+ @pytest.mark.parametrize("astype", [np.complex64, np.complex128])
+ def test_array(self, stride, astype):
+ arr = np.array([np.complex(np.nan , np.nan),
+ np.complex(np.nan , np.inf),
+ np.complex(np.inf , np.nan),
+ np.complex(np.inf , np.inf),
+ np.complex(0. , np.inf),
+ np.complex(np.inf , 0.),
+ np.complex(0. , 0.),
+ np.complex(0. , np.nan),
+ np.complex(np.nan , 0.)], dtype=astype)
+ abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype)
+ sq_true = np.array([np.complex(np.nan, np.nan),
+ np.complex(np.nan, np.nan),
+ np.complex(np.nan, np.nan),
+ np.complex(np.nan, np.inf),
+ np.complex(-np.inf, np.nan),
+ np.complex(np.inf, np.nan),
+ np.complex(0., 0.),
+ np.complex(np.nan, np.nan),
+ np.complex(np.nan, np.nan)], dtype=astype)
+ assert_equal(np.abs(arr[::stride]), abs_true[::stride])
+ with np.errstate(invalid='ignore'):
+ assert_equal(np.square(arr[::stride]), sq_true[::stride])
+
+class TestComplexAbsoluteAVX(object):
+ @pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19])
+ @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
+ @pytest.mark.parametrize("astype", [np.complex64, np.complex128])
+ # test to ensure masking and strides work as intended in the AVX implementation
+ def test_array(self, arraysize, stride, astype):
+ arr = np.ones(arraysize, dtype=astype)
+ abs_true = np.ones(arraysize, dtype=arr.real.dtype)
+ assert_equal(np.abs(arr[::stride]), abs_true[::stride])
diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py
index 2ffd8801b..8e0dd47cb 100644
--- a/numpy/core/tests/test_unicode.py
+++ b/numpy/core/tests/test_unicode.py
@@ -1,45 +1,24 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
-
import numpy as np
-from numpy.compat import unicode
from numpy.testing import assert_, assert_equal, assert_array_equal
-# Guess the UCS length for this python interpreter
-if sys.version_info[:2] >= (3, 3):
- # Python 3.3 uses a flexible string representation
- ucs4 = False
-
- def buffer_length(arr):
- if isinstance(arr, unicode):
- arr = str(arr)
- if not arr:
- charmax = 0
- else:
- charmax = max([ord(c) for c in arr])
- if charmax < 256:
- size = 1
- elif charmax < 65536:
- size = 2
- else:
- size = 4
- return size * len(arr)
- v = memoryview(arr)
- if v.shape is None:
- return len(v) * v.itemsize
+def buffer_length(arr):
+ if isinstance(arr, str):
+ if not arr:
+ charmax = 0
+ else:
+ charmax = max([ord(c) for c in arr])
+ if charmax < 256:
+ size = 1
+ elif charmax < 65536:
+ size = 2
else:
- return np.prod(v.shape) * v.itemsize
-else:
- if len(buffer(u'u')) == 4:
- ucs4 = True
+ size = 4
+ return size * len(arr)
+ v = memoryview(arr)
+ if v.shape is None:
+ return len(v) * v.itemsize
else:
- ucs4 = False
-
- def buffer_length(arr):
- if isinstance(arr, np.ndarray):
- return len(arr.data)
- return len(buffer(arr))
+ return np.prod(v.shape) * v.itemsize
# In both cases below we need to make sure that the byte swapped value (as
# UCS4) is still a valid unicode:
@@ -54,12 +33,8 @@ def test_string_cast():
uni_arr1 = str_arr.astype('>U')
uni_arr2 = str_arr.astype('<U')
- if sys.version_info[0] < 3:
- assert_array_equal(str_arr, uni_arr1)
- assert_array_equal(str_arr, uni_arr2)
- else:
- assert_(str_arr != uni_arr1)
- assert_(str_arr != uni_arr2)
+ assert_(str_arr != uni_arr1)
+ assert_(str_arr != uni_arr2)
assert_array_equal(uni_arr1, uni_arr2)
@@ -67,7 +42,7 @@ def test_string_cast():
# Creation tests
############################################################
-class CreateZeros(object):
+class CreateZeros:
"""Check the creation of zero-valued arrays"""
def content_check(self, ua, ua_scalar, nbytes):
@@ -81,10 +56,7 @@ class CreateZeros(object):
# Encode to ascii and double check
assert_(ua_scalar.encode('ascii') == b'')
# Check buffer lengths for scalars
- if ucs4:
- assert_(buffer_length(ua_scalar) == 0)
- else:
- assert_(buffer_length(ua_scalar) == 0)
+ assert_(buffer_length(ua_scalar) == 0)
def test_zeros0D(self):
# Check creation of 0-dimensional objects
@@ -119,7 +91,7 @@ class TestCreateZeros_1009(CreateZeros):
ulen = 1009
-class CreateValues(object):
+class CreateValues:
"""Check the creation of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
@@ -134,17 +106,14 @@ class CreateValues(object):
assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
- if ucs4:
- assert_(buffer_length(ua_scalar) == 4*self.ulen)
+ if self.ucs_value == ucs4_value:
+ # In UCS2, the \U0010FFFF will be represented using a
+ # surrogate *pair*
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
- if self.ucs_value == ucs4_value:
- # In UCS2, the \U0010FFFF will be represented using a
- # surrogate *pair*
- assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
- else:
- # In UCS2, the \uFFFF will be represented using a
- # regular 2-byte word
- assert_(buffer_length(ua_scalar) == 2*self.ulen)
+ # In UCS2, the \uFFFF will be represented using a
+ # regular 2-byte word
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check creation of 0-dimensional objects with values
@@ -204,7 +173,7 @@ class TestCreateValues_1009_UCS4(CreateValues):
# Assignment tests
############################################################
-class AssignValues(object):
+class AssignValues:
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
@@ -219,17 +188,14 @@ class AssignValues(object):
assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
- if ucs4:
- assert_(buffer_length(ua_scalar) == 4*self.ulen)
+ if self.ucs_value == ucs4_value:
+ # In UCS2, the \U0010FFFF will be represented using a
+ # surrogate *pair*
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
- if self.ucs_value == ucs4_value:
- # In UCS2, the \U0010FFFF will be represented using a
- # surrogate *pair*
- assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
- else:
- # In UCS2, the \uFFFF will be represented using a
- # regular 2-byte word
- assert_(buffer_length(ua_scalar) == 2*self.ulen)
+ # In UCS2, the \uFFFF will be represented using a
+ # regular 2-byte word
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check assignment of 0-dimensional objects with values
@@ -294,7 +260,7 @@ class TestAssignValues_1009_UCS4(AssignValues):
# Byteorder tests
############################################################
-class ByteorderValues(object):
+class ByteorderValues:
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
diff --git a/numpy/core/umath.py b/numpy/core/umath.py
index f3b26ab72..6a5474ffe 100644
--- a/numpy/core/umath.py
+++ b/numpy/core/umath.py
@@ -7,10 +7,11 @@ by importing from the extension module.
"""
from . import _multiarray_umath
-from numpy.core._multiarray_umath import *
-from numpy.core._multiarray_umath import (
- _UFUNC_API, _add_newdoc_ufunc, _ones_like
- )
+from ._multiarray_umath import * # noqa: F403
+# These imports are needed for backward compatibility,
+# do not change them. issue gh-11862
+# _ones_like is semi-public, on purpose not added to __all__
+from ._multiarray_umath import _UFUNC_API, _add_newdoc_ufunc, _ones_like
__all__ = [
'_UFUNC_API', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG',
diff --git a/numpy/core/umath_tests.py b/numpy/core/umath_tests.py
index 28e325b98..90ab17e67 100644
--- a/numpy/core/umath_tests.py
+++ b/numpy/core/umath_tests.py
@@ -2,8 +2,6 @@
Shim for _umath_tests to allow a deprecation period for the new name.
"""
-from __future__ import division, absolute_import, print_function
-
import warnings
# 2018-04-04, numpy 1.15.0
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 58f3ef9d3..ec3cdc33d 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -49,8 +49,6 @@ Then, we're ready to call ``foo_func``:
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['load_library', 'ndpointer', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index 8dbb63b28..79974d1c2 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -19,8 +19,6 @@ LAPACK, and for setting include paths and similar build options, please see
"""
-from __future__ import division, absolute_import, print_function
-
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
from . import ccompiler
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index 643879023..9ea083774 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -1,22 +1,23 @@
-from __future__ import division, absolute_import, print_function
-
import os
import re
import sys
-import types
import shlex
import time
import subprocess
from copy import copy
from distutils import ccompiler
-from distutils.ccompiler import *
-from distutils.errors import DistutilsExecError, DistutilsModuleError, \
- DistutilsPlatformError, CompileError
+from distutils.ccompiler import (
+ compiler_class, gen_lib_options, get_default_compiler, new_compiler,
+ CCompiler
+)
+from distutils.errors import (
+ DistutilsExecError, DistutilsModuleError, DistutilsPlatformError,
+ CompileError, UnknownFileError
+)
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
-from numpy.distutils.compat import get_exception
from numpy.distutils.exec_command import (
filepath_from_subprocess_output, forward_bytes_to_stdout
)
@@ -85,11 +86,8 @@ def _needs_build(obj, cc_args, extra_postargs, pp_opts):
def replace_method(klass, method_name, func):
- if sys.version_info[0] < 3:
- m = types.MethodType(func, None, klass)
- else:
- # Py3k does not have unbound method anymore, MethodType does not work
- m = lambda self, *args, **kw: func(self, *args, **kw)
+ # Py3k does not have unbound method anymore, MethodType does not work
+ m = lambda self, *args, **kw: func(self, *args, **kw)
setattr(klass, method_name, m)
@@ -277,12 +275,8 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None,
if not sources:
return []
- # FIXME:RELATIVE_IMPORT
- if sys.version_info[0] < 3:
- from .fcompiler import FCompiler, is_f_file, has_f90_header
- else:
- from numpy.distutils.fcompiler import (FCompiler, is_f_file,
- has_f90_header)
+ from numpy.distutils.fcompiler import (FCompiler, is_f_file,
+ has_f90_header)
if isinstance(self, FCompiler):
display = []
for fc in ['f77', 'f90', 'fix']:
@@ -751,15 +745,15 @@ def new_compiler (plat=None,
module_name = "numpy.distutils." + module_name
try:
__import__ (module_name)
- except ImportError:
- msg = str(get_exception())
+ except ImportError as e:
+ msg = str(e)
log.info('%s in numpy.distutils; trying from distutils',
str(msg))
module_name = module_name[6:]
try:
__import__(module_name)
- except ImportError:
- msg = str(get_exception())
+ except ImportError as e:
+ msg = str(e)
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
module_name)
try:
diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py
index 76a260072..3ba501de0 100644
--- a/numpy/distutils/command/__init__.py
+++ b/numpy/distutils/command/__init__.py
@@ -4,8 +4,6 @@ Package containing implementation of all the standard Distutils
commands.
"""
-from __future__ import division, absolute_import, print_function
-
def test_na_writable_attributes_deletion():
a = np.NA(2)
attr = ['payload', 'dtype']
diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py
index 9c98b84d8..1475a5e24 100644
--- a/numpy/distutils/command/autodist.py
+++ b/numpy/distutils/command/autodist.py
@@ -1,8 +1,6 @@
"""This module implements additional tests ala autoconf which can be useful.
"""
-from __future__ import division, absolute_import, print_function
-
import textwrap
# We put them here since they could be easily reused outside numpy.distutils
diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py
index 3e52a503b..682e7a8eb 100644
--- a/numpy/distutils/command/bdist_rpm.py
+++ b/numpy/distutils/command/bdist_rpm.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import sys
if 'setuptools' in sys.modules:
diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py
index 5a9da1217..a156a7c6e 100644
--- a/numpy/distutils/command/build.py
+++ b/numpy/distutils/command/build.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import sys
from distutils.command.build import build as old_build
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 13edf0717..d679b2d03 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -1,7 +1,5 @@
""" Modified version of build_clib that handles fortran source files.
"""
-from __future__ import division, absolute_import, print_function
-
import os
from glob import glob
import shutil
@@ -11,9 +9,10 @@ from distutils.errors import DistutilsSetupError, DistutilsError, \
from numpy.distutils import log
from distutils.dep_util import newer_group
-from numpy.distutils.misc_util import filter_sources, has_f_sources,\
- has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \
- get_numpy_include_dirs
+from numpy.distutils.misc_util import (
+ filter_sources, get_lib_source_files, get_numpy_include_dirs,
+ has_cxx_sources, has_f_sources, is_sequence
+)
# Fix Python distutils bug sf #1718574:
_l = old_build_clib.user_options
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index cd9b1c6f1..0e07ba9cf 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -1,8 +1,6 @@
""" Modified version of build_ext that handles fortran source files.
"""
-from __future__ import division, absolute_import, print_function
-
import os
import subprocess
from glob import glob
@@ -15,11 +13,11 @@ from distutils.file_util import copy_file
from numpy.distutils import log
from numpy.distutils.exec_command import filepath_from_subprocess_output
-from numpy.distutils.system_info import combine_paths, system_info
-from numpy.distutils.misc_util import filter_sources, has_f_sources, \
- has_cxx_sources, get_ext_source_files, \
- get_numpy_include_dirs, is_sequence, get_build_architecture, \
- msvc_version
+from numpy.distutils.system_info import combine_paths
+from numpy.distutils.misc_util import (
+ filter_sources, get_ext_source_files, get_numpy_include_dirs,
+ has_cxx_sources, has_f_sources, is_sequence
+)
from numpy.distutils.command.config_compiler import show_fortran_compilers
diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py
index 54dcde435..d30dc5bf4 100644
--- a/numpy/distutils/command/build_py.py
+++ b/numpy/distutils/command/build_py.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from distutils.command.build_py import build_py as old_build_py
from numpy.distutils.misc_util import is_string
diff --git a/numpy/distutils/command/build_scripts.py b/numpy/distutils/command/build_scripts.py
index c8b25fc71..d5cadb274 100644
--- a/numpy/distutils/command/build_scripts.py
+++ b/numpy/distutils/command/build_scripts.py
@@ -1,8 +1,6 @@
""" Modified version of build_scripts that handles building scripts from functions.
"""
-from __future__ import division, absolute_import, print_function
-
from distutils.command.build_scripts import build_scripts as old_build_scripts
from numpy.distutils import log
from numpy.distutils.misc_util import is_string
diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py
index 3e0522c5f..303d6197c 100644
--- a/numpy/distutils/command/build_src.py
+++ b/numpy/distutils/command/build_src.py
@@ -1,7 +1,5 @@
""" Build swig and f2py sources.
"""
-from __future__ import division, absolute_import, print_function
-
import os
import re
import sys
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index b9f2fa76e..e54a54449 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -2,13 +2,12 @@
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
-from __future__ import division, absolute_import, print_function
-
-import os, signal
-import warnings
-import sys
+import os
+import signal
import subprocess
+import sys
import textwrap
+import warnings
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
@@ -24,7 +23,6 @@ from numpy.distutils.command.autodist import (check_gcc_function_attribute,
check_inline,
check_restrict,
check_compiler_gcc4)
-from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
@@ -52,8 +50,7 @@ class config(old_config):
if not self.compiler.initialized:
try:
self.compiler.initialize()
- except IOError:
- e = get_exception()
+ except IOError as e:
msg = textwrap.dedent("""\
Could not initialize compiler instance: do you have Visual Studio
installed? If you are trying to build with MinGW, please use "python setup.py
@@ -96,8 +93,8 @@ class config(old_config):
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
- except (DistutilsExecError, CompileError):
- str(get_exception())
+ except (DistutilsExecError, CompileError) as e:
+ str(e)
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
@@ -495,7 +492,7 @@ class config(old_config):
self._clean()
return exitcode, output
-class GrabStdout(object):
+class GrabStdout:
def __init__(self):
self.sys_stdout = sys.stdout
diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py
index bf170063e..44265bfcc 100644
--- a/numpy/distutils/command/config_compiler.py
+++ b/numpy/distutils/command/config_compiler.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from distutils.core import Command
from numpy.distutils import log
diff --git a/numpy/distutils/command/develop.py b/numpy/distutils/command/develop.py
index 1410ab2a0..af24baf2e 100644
--- a/numpy/distutils/command/develop.py
+++ b/numpy/distutils/command/develop.py
@@ -3,8 +3,6 @@ generated files (from build_src or build_scripts) are properly converted to real
files with filenames.
"""
-from __future__ import division, absolute_import, print_function
-
from setuptools.command.develop import develop as old_develop
class develop(old_develop):
diff --git a/numpy/distutils/command/egg_info.py b/numpy/distutils/command/egg_info.py
index 18673ece7..14c62b4d1 100644
--- a/numpy/distutils/command/egg_info.py
+++ b/numpy/distutils/command/egg_info.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
from setuptools.command.egg_info import egg_info as _egg_info
diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py
index c74ae9446..2eff2d145 100644
--- a/numpy/distutils/command/install.py
+++ b/numpy/distutils/command/install.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
if 'setuptools' in sys.modules:
import setuptools.command.install as old_install_mod
diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py
index 6a73f7e33..aa2e5594c 100644
--- a/numpy/distutils/command/install_clib.py
+++ b/numpy/distutils/command/install_clib.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
from distutils.core import Command
from distutils.ccompiler import new_compiler
diff --git a/numpy/distutils/command/install_data.py b/numpy/distutils/command/install_data.py
index 996cf7e40..0a2e68ae1 100644
--- a/numpy/distutils/command/install_data.py
+++ b/numpy/distutils/command/install_data.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
have_setuptools = ('setuptools' in sys.modules)
diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py
index f3f58aa28..bb4ad563b 100644
--- a/numpy/distutils/command/install_headers.py
+++ b/numpy/distutils/command/install_headers.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
from distutils.command.install_headers import install_headers as old_install_headers
diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py
index bfaab1c8f..e34193883 100644
--- a/numpy/distutils/command/sdist.py
+++ b/numpy/distutils/command/sdist.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
if 'setuptools' in sys.modules:
from setuptools.command.sdist import sdist as old_sdist
diff --git a/numpy/distutils/compat.py b/numpy/distutils/compat.py
deleted file mode 100644
index 9a81cd392..000000000
--- a/numpy/distutils/compat.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""Small modules to cope with python 2 vs 3 incompatibilities inside
-numpy.distutils
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import sys
-
-def get_exception():
- return sys.exc_info()[1]
diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py
index 3bcb7b884..d08015fdf 100644
--- a/numpy/distutils/conv_template.py
+++ b/numpy/distutils/conv_template.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
takes templated file .xxx.src and produces .xxx file where .xxx is
.i or .c or .h, using the following template rules
@@ -78,8 +78,6 @@ Example:
3, 3, jim
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['process_str', 'process_file']
@@ -87,8 +85,6 @@ import os
import sys
import re
-from numpy.distutils.compat import get_exception
-
# names for replacement that are already global.
global_names = {}
@@ -240,8 +236,7 @@ def parse_string(astr, env, level, line) :
code.append(replace_re.sub(replace, pref))
try :
envlist = parse_loop_header(head)
- except ValueError:
- e = get_exception()
+ except ValueError as e:
msg = "line %d: %s" % (newline, e)
raise ValueError(msg)
for newenv in envlist :
@@ -289,8 +284,7 @@ def process_file(source):
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
try:
code = process_str(''.join(lines))
- except ValueError:
- e = get_exception()
+ except ValueError as e:
raise ValueError('In "%s" loop at %s' % (sourcefile, e))
return '#line 1 "%s"\n%s' % (sourcefile, code)
@@ -327,8 +321,7 @@ def main():
allstr = fid.read()
try:
writestr = process_str(allstr)
- except ValueError:
- e = get_exception()
+ except ValueError as e:
raise ValueError("In %s loop at %s" % (file, e))
outfile.write(writestr)
diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py
index 70cc37caa..d5551f349 100644
--- a/numpy/distutils/core.py
+++ b/numpy/distutils/core.py
@@ -1,7 +1,5 @@
-from __future__ import division, absolute_import, print_function
-
import sys
-from distutils.core import *
+from distutils.core import Distribution
if 'setuptools' in sys.modules:
have_setuptools = True
@@ -27,7 +25,7 @@ from numpy.distutils.command import config, config_compiler, \
build, build_py, build_ext, build_clib, build_src, build_scripts, \
sdist, install_data, install_headers, install, bdist_rpm, \
install_clib
-from numpy.distutils.misc_util import get_data_files, is_sequence, is_string
+from numpy.distutils.misc_util import is_sequence, is_string
numpy_cmdclass = {'build': build.build,
'build_src': build_src.build_src,
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
index bc9728335..51ce3c129 100644
--- a/numpy/distutils/cpuinfo.py
+++ b/numpy/distutils/cpuinfo.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
cpuinfo
@@ -12,28 +12,22 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['cpu']
-import sys, re, types
import os
-
-if sys.version_info[0] >= 3:
- from subprocess import getstatusoutput
-else:
- from commands import getstatusoutput
-
-import warnings
import platform
+import re
+import sys
+import types
+import warnings
+
+from subprocess import getstatusoutput
-from numpy.distutils.compat import get_exception
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
status, output = getstatusoutput(cmd)
- except EnvironmentError:
- e = get_exception()
+ except EnvironmentError as e:
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, ""
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
@@ -67,7 +61,7 @@ def key_value_from_command(cmd, sep, successful_status=(0,),
d[l[0]] = l[1]
return d
-class CPUInfoBase(object):
+class CPUInfoBase:
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
@@ -115,8 +109,7 @@ class LinuxCPUInfo(CPUInfoBase):
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
- except EnvironmentError:
- e = get_exception()
+ except EnvironmentError as e:
warnings.warn(str(e), UserWarning, stacklevel=2)
else:
for line in fo:
@@ -490,10 +483,7 @@ class Win32CPUInfo(CPUInfoBase):
info = []
try:
#XXX: Bad style to use so long `try:...except:...`. Fix it!
- if sys.version_info[0] >= 3:
- import winreg
- else:
- import _winreg as winreg
+ import winreg
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"
r"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE)
@@ -523,8 +513,8 @@ class Win32CPUInfo(CPUInfoBase):
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
- except Exception:
- print(sys.exc_info()[1], '(ignoring)')
+ except Exception as e:
+ print(e, '(ignoring)')
self.__class__.info = info
def _not_impl(self): pass
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
index 712f22666..fb10d2470 100644
--- a/numpy/distutils/exec_command.py
+++ b/numpy/distutils/exec_command.py
@@ -49,8 +49,6 @@ Known bugs:
because the messages are lost at some point.
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['exec_command', 'find_executable']
import os
@@ -76,10 +74,6 @@ def filepath_from_subprocess_output(output):
# Another historical oddity
if output[-1:] == '\n':
output = output[:-1]
- # stdio uses bytes in python 2, so to avoid issues, we simply
- # remove all non-ascii characters
- if sys.version_info < (3, 0):
- output = output.encode('ascii', errors='replace')
return output
@@ -91,10 +85,7 @@ def forward_bytes_to_stdout(val):
The assumption is that the subprocess call already returned bytes in
a suitable encoding.
"""
- if sys.version_info.major < 3:
- # python 2 has binary output anyway
- sys.stdout.write(val)
- elif hasattr(sys.stdout, 'buffer'):
+ if hasattr(sys.stdout, 'buffer'):
# use the underlying binary output if there is one
sys.stdout.buffer.write(val)
elif hasattr(sys.stdout, 'encoding'):
@@ -307,11 +298,6 @@ def _exec_command(command, use_shell=None, use_tee = None, **env):
if text[-1:] == '\n':
text = text[:-1]
- # stdio uses bytes in python 2, so to avoid issues, we simply
- # remove all non-ascii characters
- if sys.version_info < (3, 0):
- text = text.encode('ascii', errors='replace')
-
if use_tee and text:
print(text)
return proc.returncode, text
diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py
index 872bd5362..67114ef2e 100644
--- a/numpy/distutils/extension.py
+++ b/numpy/distutils/extension.py
@@ -6,15 +6,9 @@ modules in setup scripts.
Overridden to support f2py.
"""
-from __future__ import division, absolute_import, print_function
-
-import sys
import re
from distutils.extension import Extension as old_Extension
-if sys.version_info[0] >= 3:
- basestring = str
-
cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
@@ -76,7 +70,7 @@ class Extension(old_Extension):
self.swig_opts = swig_opts or []
# swig_opts is assumed to be a list. Here we handle the case where it
# is specified as a string instead.
- if isinstance(self.swig_opts, basestring):
+ if isinstance(self.swig_opts, str):
import warnings
msg = "swig_opts is specified as a string instead of a list"
warnings.warn(msg, SyntaxWarning, stacklevel=2)
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 3723470f3..1c3069363 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -13,15 +13,12 @@ should be a list.
But note that FCompiler.executables is actually a dictionary of commands.
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',
'dummy_fortran_file']
import os
import sys
import re
-import types
from numpy.compat import open_latin1
@@ -36,7 +33,6 @@ from numpy.distutils import log
from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
make_temp_file, get_shared_lib_extension
from numpy.distutils.exec_command import find_executable
-from numpy.distutils.compat import get_exception
from numpy.distutils import _shell_utils
from .environment import EnvironmentConfig
@@ -614,8 +610,8 @@ class FCompiler(CCompiler):
src)
try:
self.spawn(command, display=display)
- except DistutilsExecError:
- msg = str(get_exception())
+ except DistutilsExecError as e:
+ msg = str(e)
raise CompileError(msg)
def module_options(self, module_dirs, module_build_dir):
@@ -682,8 +678,8 @@ class FCompiler(CCompiler):
command = linker + ld_args
try:
self.spawn(command)
- except DistutilsExecError:
- msg = str(get_exception())
+ except DistutilsExecError as e:
+ msg = str(e)
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
@@ -931,8 +927,7 @@ def show_fcompilers(dist=None):
c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
c.customize(dist)
v = c.get_version()
- except (DistutilsModuleError, CompilerNotFound):
- e = get_exception()
+ except (DistutilsModuleError, CompilerNotFound) as e:
log.debug("show_fcompilers: %s not found" % (compiler,))
log.debug(repr(e))
diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py
index d14fee0e1..efe3a4cb5 100644
--- a/numpy/distutils/fcompiler/absoft.py
+++ b/numpy/distutils/fcompiler/absoft.py
@@ -5,8 +5,6 @@
# Notes:
# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
# generated extension modules (works for f2py v2.45.241_1936 and up)
-from __future__ import division, absolute_import, print_function
-
import os
from numpy.distutils.cpuinfo import cpu
diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py
index 671b3a55f..6ce590c7c 100644
--- a/numpy/distutils/fcompiler/compaq.py
+++ b/numpy/distutils/fcompiler/compaq.py
@@ -1,12 +1,9 @@
#http://www.compaq.com/fortran/docs/
-from __future__ import division, absolute_import, print_function
-
import os
import sys
from numpy.distutils.fcompiler import FCompiler
-from numpy.distutils.compat import get_exception
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
@@ -82,19 +79,16 @@ class CompaqVisualFCompiler(FCompiler):
ar_exe = m.lib
except DistutilsPlatformError:
pass
- except AttributeError:
- msg = get_exception()
+ except AttributeError as e:
if '_MSVCCompiler__root' in str(msg):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg))
else:
raise
- except IOError:
- e = get_exception()
+ except IOError as e:
if not "vcvarsall.bat" in str(e):
print("Unexpected IOError in", __file__)
raise e
- except ValueError:
- e = get_exception()
+ except ValueError as e:
if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise e
diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py
index bb362d483..21a5be003 100644
--- a/numpy/distutils/fcompiler/environment.py
+++ b/numpy/distutils/fcompiler/environment.py
@@ -1,12 +1,9 @@
-from __future__ import division, absolute_import, print_function
-
import os
-import warnings
from distutils.dist import Distribution
__metaclass__ = type
-class EnvironmentConfig(object):
+class EnvironmentConfig:
def __init__(self, distutils_section='ALL', **kw):
self._distutils_section = distutils_section
self._conf_keys = kw
diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py
index e7c659b33..e109a972a 100644
--- a/numpy/distutils/fcompiler/g95.py
+++ b/numpy/distutils/fcompiler/g95.py
@@ -1,6 +1,4 @@
# http://g95.sourceforge.net/
-from __future__ import division, absolute_import, print_function
-
from numpy.distutils.fcompiler import FCompiler
compilers = ['G95FCompiler']
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 965c67041..796dff351 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import re
import os
import sys
@@ -12,8 +10,6 @@ import subprocess
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.fcompiler import FCompiler
-from numpy.distutils.compat import get_exception
-from numpy.distutils.system_info import system_info
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
@@ -126,26 +122,17 @@ class GnuFCompiler(FCompiler):
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
- # we try to get it first from the Python Makefile and then we
- # fall back to setting it to 10.3 to maximize the set of
- # versions we can work with. This is a reasonable default
+ # we try to get it first from sysconfig and then
+ # fall back to setting it to 10.9 This is a reasonable default
# even when using the official Python dist and those derived
# from it.
- import distutils.sysconfig as sc
- g = {}
- try:
- get_makefile_filename = sc.get_makefile_filename
- except AttributeError:
- pass # i.e. PyPy
- else:
- filename = get_makefile_filename()
- sc.parse_makefile(filename, g)
- target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
- os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
- if target == '10.3':
- s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
+ import sysconfig
+ target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
+ if not target:
+ target = '10.9'
+ s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}'
warnings.warn(s, stacklevel=2)
-
+ os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
@@ -415,8 +402,7 @@ class Gnu95FCompiler(GnuFCompiler):
break
h.update(block)
text = base64.b32encode(h.digest())
- if sys.version_info[0] >= 3:
- text = text.decode('ascii')
+ text = text.decode('ascii')
return text.rstrip('=')
def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir,
@@ -560,5 +546,5 @@ if __name__ == '__main__':
print(customized_fcompiler('gnu').get_version())
try:
print(customized_fcompiler('g95').get_version())
- except Exception:
- print(get_exception())
+ except Exception as e:
+ print(e)
diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py
index 51bad548a..09e6483bf 100644
--- a/numpy/distutils/fcompiler/hpux.py
+++ b/numpy/distutils/fcompiler/hpux.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from numpy.distutils.fcompiler import FCompiler
compilers = ['HPUXFCompiler']
diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py
index 70d2132e1..4a83682e5 100644
--- a/numpy/distutils/fcompiler/ibm.py
+++ b/numpy/distutils/fcompiler/ibm.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import re
import sys
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index 51f681274..d84f38c76 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -1,6 +1,4 @@
# http://developer.intel.com/software/products/compilers/flin/
-from __future__ import division, absolute_import, print_function
-
import sys
from numpy.distutils.ccompiler import simple_version_match
diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py
index 1beb662f4..e92583826 100644
--- a/numpy/distutils/fcompiler/lahey.py
+++ b/numpy/distutils/fcompiler/lahey.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py
index da337b24a..a09738045 100644
--- a/numpy/distutils/fcompiler/mips.py
+++ b/numpy/distutils/fcompiler/mips.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py
index cb71d548c..908e724e6 100644
--- a/numpy/distutils/fcompiler/nag.py
+++ b/numpy/distutils/fcompiler/nag.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import re
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py
index bdeea1560..ef411fffc 100644
--- a/numpy/distutils/fcompiler/none.py
+++ b/numpy/distutils/fcompiler/none.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils import customized_fcompiler
diff --git a/numpy/distutils/fcompiler/pathf95.py b/numpy/distutils/fcompiler/pathf95.py
index 5de86f63a..0768cb12e 100644
--- a/numpy/distutils/fcompiler/pathf95.py
+++ b/numpy/distutils/fcompiler/pathf95.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from numpy.distutils.fcompiler import FCompiler
compilers = ['PathScaleFCompiler']
diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py
index 9c51947fd..eb628cb63 100644
--- a/numpy/distutils/fcompiler/pg.py
+++ b/numpy/distutils/fcompiler/pg.py
@@ -1,9 +1,7 @@
# http://www.pgroup.com
-from __future__ import division, absolute_import, print_function
-
import sys
-from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
+from numpy.distutils.fcompiler import FCompiler
from sys import platform
from os.path import join, dirname, normpath
@@ -64,72 +62,60 @@ class PGroupFCompiler(FCompiler):
return '-R%s' % dir
-if sys.version_info >= (3, 5):
- import functools
-
- class PGroupFlangCompiler(FCompiler):
- compiler_type = 'flang'
- description = 'Portland Group Fortran LLVM Compiler'
- version_pattern = r'\s*(flang|clang) version (?P<version>[\d.-]+).*'
-
- ar_exe = 'lib.exe'
- possible_executables = ['flang']
-
- executables = {
- 'version_cmd': ["<F77>", "--version"],
- 'compiler_f77': ["flang"],
- 'compiler_fix': ["flang"],
- 'compiler_f90': ["flang"],
- 'linker_so': [None],
- 'archiver': [ar_exe, "/verbose", "/OUT:"],
- 'ranlib': None
- }
+import functools
- library_switch = '/OUT:' # No space after /OUT:!
- module_dir_switch = '-module ' # Don't remove ending space!
+class PGroupFlangCompiler(FCompiler):
+ compiler_type = 'flang'
+ description = 'Portland Group Fortran LLVM Compiler'
+ version_pattern = r'\s*(flang|clang) version (?P<version>[\d.-]+).*'
- def get_libraries(self):
- opt = FCompiler.get_libraries(self)
- opt.extend(['flang', 'flangrti', 'ompstub'])
- return opt
+ ar_exe = 'lib.exe'
+ possible_executables = ['flang']
- @functools.lru_cache(maxsize=128)
- def get_library_dirs(self):
- """List of compiler library directories."""
- opt = FCompiler.get_library_dirs(self)
- flang_dir = dirname(self.executables['compiler_f77'][0])
- opt.append(normpath(join(flang_dir, '..', 'lib')))
+ executables = {
+ 'version_cmd': ["<F77>", "--version"],
+ 'compiler_f77': ["flang"],
+ 'compiler_fix': ["flang"],
+ 'compiler_f90': ["flang"],
+ 'linker_so': [None],
+ 'archiver': [ar_exe, "/verbose", "/OUT:"],
+ 'ranlib': None
+ }
- return opt
+ library_switch = '/OUT:' # No space after /OUT:!
+ module_dir_switch = '-module ' # Don't remove ending space!
- def get_flags(self):
- return []
+ def get_libraries(self):
+ opt = FCompiler.get_libraries(self)
+ opt.extend(['flang', 'flangrti', 'ompstub'])
+ return opt
- def get_flags_free(self):
- return []
+ @functools.lru_cache(maxsize=128)
+ def get_library_dirs(self):
+ """List of compiler library directories."""
+ opt = FCompiler.get_library_dirs(self)
+ flang_dir = dirname(self.executables['compiler_f77'][0])
+ opt.append(normpath(join(flang_dir, '..', 'lib')))
- def get_flags_debug(self):
- return ['-g']
+ return opt
- def get_flags_opt(self):
- return ['-O3']
+ def get_flags(self):
+ return []
- def get_flags_arch(self):
- return []
+ def get_flags_free(self):
+ return []
- def runtime_library_dir_option(self, dir):
- raise NotImplementedError
+ def get_flags_debug(self):
+ return ['-g']
-else:
- from numpy.distutils.fcompiler import CompilerNotFound
+ def get_flags_opt(self):
+ return ['-O3']
- # No point in supporting on older Pythons because not ABI compatible
- class PGroupFlangCompiler(FCompiler):
- compiler_type = 'flang'
- description = 'Portland Group Fortran LLVM Compiler'
+ def get_flags_arch(self):
+ return []
- def get_version(self):
- raise CompilerNotFound('Flang unsupported on Python < 3.5')
+ def runtime_library_dir_option(self, dir):
+ raise NotImplementedError
if __name__ == '__main__':
diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py
index 561ea854f..d039f0b25 100644
--- a/numpy/distutils/fcompiler/sun.py
+++ b/numpy/distutils/fcompiler/sun.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from numpy.distutils.ccompiler import simple_version_match
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/vast.py b/numpy/distutils/fcompiler/vast.py
index adc1591bd..92a1647ba 100644
--- a/numpy/distutils/fcompiler/vast.py
+++ b/numpy/distutils/fcompiler/vast.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
from numpy.distutils.fcompiler.gnu import GnuFCompiler
diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py
index c5c1163c6..070b7d8b8 100644
--- a/numpy/distutils/from_template.py
+++ b/numpy/distutils/from_template.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
process_file(filename)
@@ -45,8 +45,6 @@ process_file(filename)
<ctypereal=float,double,\\0,\\1>
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['process_str', 'process_file']
import os
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
index 3386775ee..0388ad577 100644
--- a/numpy/distutils/intelccompiler.py
+++ b/numpy/distutils/intelccompiler.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import platform
from distutils.unixccompiler import UnixCCompiler
diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py
index 2d013a1e3..820ed71f5 100644
--- a/numpy/distutils/lib2def.py
+++ b/numpy/distutils/lib2def.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import re
import sys
import subprocess
@@ -24,7 +22,7 @@ __version__ = '0.1a'
py_ver = "%d%d" % tuple(sys.version_info[:2])
-DEFAULT_NM = 'nm -Cs'
+DEFAULT_NM = ['nm', '-Cs']
DEF_HEADER = """LIBRARY python%s.dll
;CODE PRELOAD MOVEABLE DISCARDABLE
@@ -61,13 +59,16 @@ libfile, deffile = parse_cmd()"""
deffile = None
return libfile, deffile
-def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]):
+def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True):
"""Returns the output of nm_cmd via a pipe.
-nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
- f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
- nm_output = f.stdout.read()
- f.stdout.close()
+nm_output = getnm(nm_cmd = 'nm -Cs py_lib')"""
+ p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, universal_newlines=True)
+ nm_output, nm_err = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError('failed to run "%s": "%s"' % (
+ ' '.join(nm_cmd), nm_err))
return nm_output
def parse_nm(nm_output):
@@ -109,7 +110,7 @@ if __name__ == '__main__':
deffile = sys.stdout
else:
deffile = open(deffile, 'w')
- nm_cmd = [str(DEFAULT_NM), str(libfile)]
- nm_output = getnm(nm_cmd)
+ nm_cmd = DEFAULT_NM + [str(libfile)]
+ nm_output = getnm(nm_cmd, shell=False)
dlist, flist = parse_nm(nm_output)
output_def(dlist, flist, DEF_HEADER, deffile)
diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py
index fe8fd1b0f..686e5ebd9 100644
--- a/numpy/distutils/line_endings.py
+++ b/numpy/distutils/line_endings.py
@@ -1,9 +1,10 @@
""" Functions for converting from DOS to UNIX line endings
"""
-from __future__ import division, absolute_import, print_function
+import os
+import re
+import sys
-import sys, re, os
def dos2unix(file):
"Replace CRLF with LF in argument files. Print names of changed files."
diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py
index ff7de86b1..a8113b9c6 100644
--- a/numpy/distutils/log.py
+++ b/numpy/distutils/log.py
@@ -1,17 +1,11 @@
-# Colored log, requires Python 2.3 or up.
-from __future__ import division, absolute_import, print_function
-
+# Colored log
import sys
-from distutils.log import *
+from distutils.log import * # noqa: F403
from distutils.log import Log as old_Log
from distutils.log import _global_log
-if sys.version_info[0] < 3:
- from .misc_util import (red_text, default_text, cyan_text, green_text,
- is_sequence, is_string)
-else:
- from numpy.distutils.misc_util import (red_text, default_text, cyan_text,
- green_text, is_sequence, is_string)
+from numpy.distutils.misc_util import (red_text, default_text, cyan_text,
+ green_text, is_sequence, is_string)
def _fix_args(args,flag=1):
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index 075858cfe..7cb6fadcc 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -7,8 +7,6 @@ Support code for building Python extensions on Windows.
# 3. Force windows to use g77
"""
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import subprocess
@@ -16,12 +14,8 @@ import re
import textwrap
# Overwrite certain distutils.ccompiler functions:
-import numpy.distutils.ccompiler
-
-if sys.version_info[0] < 3:
- from . import log
-else:
- from numpy.distutils import log
+import numpy.distutils.ccompiler # noqa: F401
+from numpy.distutils import log
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
@@ -32,8 +26,7 @@ import distutils.cygwinccompiler
from distutils.version import StrictVersion
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
-from distutils.errors import (DistutilsExecError, CompileError,
- UnknownFileError)
+from distutils.errors import UnknownFileError
from numpy.distutils.misc_util import (msvc_runtime_library,
msvc_runtime_version,
msvc_runtime_major,
@@ -71,10 +64,10 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
- p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
- stdout=subprocess.PIPE)
- out_string = p.stdout.read()
- p.stdout.close()
+ try:
+ out_string = subprocess.check_output(['gcc', '-dumpversion'])
+ except (OSError, CalledProcessError):
+ out_string = "" # ignore failures to match old behavior
result = re.search(r'(\d+\.\d+)', out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
@@ -285,8 +278,8 @@ def find_python_dll():
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
- st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE)
- return st.stdout.readlines()
+ st = subprocess.check_output(["objdump.exe", "-p", dll])
+ return st.split(b'\n')
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
@@ -311,15 +304,14 @@ def generate_def(dll, dfile):
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
- d = open(dfile, 'w')
- d.write('LIBRARY %s\n' % os.path.basename(dll))
- d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
- d.write(';DATA PRELOAD SINGLE\n')
- d.write('\nEXPORTS\n')
- for s in syms:
- #d.write('@%d %s\n' % (s[0], s[1]))
- d.write('%s\n' % s[1])
- d.close()
+ with open(dfile, 'w') as d:
+ d.write('LIBRARY %s\n' % os.path.basename(dll))
+ d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
+ d.write(';DATA PRELOAD SINGLE\n')
+ d.write('\nEXPORTS\n')
+ for s in syms:
+ #d.write('@%d %s\n' % (s[0], s[1]))
+ d.write('%s\n' % s[1])
def find_dll(dll_name):
@@ -472,7 +464,7 @@ def _build_import_library_amd64():
# generate import library from this symbol list
cmd = ['dlltool', '-d', def_file, '-l', out_file]
- subprocess.Popen(cmd)
+ subprocess.check_call(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
@@ -506,16 +498,19 @@ def _build_import_library_x86():
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
- nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
- nm_output = lib2def.getnm(nm_cmd)
+ nm_output = lib2def.getnm(
+ lib2def.DEFAULT_NM + [lib_file], shell=False)
dlist, flist = lib2def.parse_nm(nm_output)
- lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
+ with open(def_file, 'w') as fid:
+ lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid)
dll_name = find_python_dll ()
- args = (dll_name, def_file, out_file)
- cmd = 'dlltool --dllname "%s" --def "%s" --output-lib "%s"' % args
- status = os.system(cmd)
- # for now, fail silently
+
+ cmd = ["dlltool",
+ "--dllname", dll_name,
+ "--def", def_file,
+ "--output-lib", out_file]
+ status = subprocess.check_output(cmd)
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
return
@@ -548,6 +543,8 @@ if sys.platform == 'win32':
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
# on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
+ # Python 3.7 uses 1415, but get_build_version returns 140 ??
+ _MSVCRVER_TO_FULLVER['140'] = "14.15.26726.0"
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2)
_MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 7ba8ad862..9f9e9f1ac 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import re
import sys
@@ -34,8 +32,6 @@ def clean_up_temporary_directory():
atexit.register(clean_up_temporary_directory)
-from numpy.distutils.compat import get_exception
-from numpy.compat import basestring
from numpy.compat import npy_load_module
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
@@ -51,7 +47,7 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
-class InstallableLib(object):
+class InstallableLib:
"""
Container to hold information on an installable library.
@@ -168,7 +164,6 @@ def get_path_from_frame(frame, parent_path=None):
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
- # hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
@@ -453,7 +448,7 @@ def _get_f90_modules(source):
return modules
def is_string(s):
- return isinstance(s, basestring)
+ return isinstance(s, str)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
@@ -728,7 +723,7 @@ def get_frame(level=0):
######################
-class Configuration(object):
+class Configuration:
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
@@ -926,18 +921,8 @@ class Configuration(object):
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
- def fix_args_py2(args):
- if setup_module.configuration.__code__.co_argcount > 1:
- args = args + (self.top_path,)
- return args
- def fix_args_py3(args):
- if setup_module.configuration.__code__.co_argcount > 1:
- args = args + (self.top_path,)
- return args
- if sys.version_info[0] < 3:
- args = fix_args_py2(args)
- else:
- args = fix_args_py3(args)
+ if setup_module.configuration.__code__.co_argcount > 1:
+ args = args + (self.top_path,)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
@@ -1868,8 +1853,7 @@ class Configuration(object):
"""Return path's SVN revision number.
"""
try:
- output = subprocess.check_output(
- ['svnversion'], shell=True, cwd=path)
+ output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
@@ -1899,7 +1883,7 @@ class Configuration(object):
"""
try:
output = subprocess.check_output(
- ['hg identify --num'], shell=True, cwd=path)
+ ['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
@@ -1972,9 +1956,8 @@ class Configuration(object):
try:
version_module = npy_load_module('_'.join(n.split('.')),
fn, info)
- except ImportError:
- msg = get_exception()
- self.warn(str(msg))
+ except ImportError as e:
+ self.warn(str(e))
version_module = None
if version_module is None:
continue
@@ -2246,10 +2229,7 @@ def get_info(pkgname, dirs=None):
return info
def is_bootstrapping():
- if sys.version_info[0] >= 3:
- import builtins
- else:
- import __builtin__ as builtins
+ import builtins
try:
builtins.__NUMPY_SETUP__
@@ -2329,8 +2309,11 @@ def generate_config_py(target):
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
- os.environ.setdefault('PATH', '')
- os.environ['PATH'] += os.pathsep + extra_dll_dir
+ if sys.version_info >= (3, 8):
+ os.add_dll_directory(extra_dll_dir)
+ else:
+ os.environ.setdefault('PATH', '')
+ os.environ['PATH'] += os.pathsep + extra_dll_dir
"""))
@@ -2342,6 +2325,43 @@ def generate_config_py(target):
return g.get(name, g.get(name + "_info", {}))
def show():
+ """
+ Show libraries in the system on which NumPy was built.
+
+ Print information about various resources (libraries, library
+ directories, include directories, etc.) in the system on which
+ NumPy was built.
+
+ See Also
+ --------
+ get_include : Returns the directory containing NumPy C
+ header files.
+
+ Notes
+ -----
+ Classes specifying the information to be printed are defined
+ in the `numpy.distutils.system_info` module.
+
+ Information may include:
+
+ * ``language``: language used to write the libraries (mostly
+ C or f77)
+ * ``libraries``: names of libraries found in the system
+ * ``library_dirs``: directories containing the libraries
+ * ``include_dirs``: directories containing library header files
+ * ``src_dirs``: directories containing library source files
+ * ``define_macros``: preprocessor macros used by
+ ``distutils.setup``
+
+ Examples
+ --------
+ >>> np.show_config()
+ blas_opt_info:
+ language = c
+ define_macros = [('HAVE_CBLAS', None)]
+ libraries = ['openblas', 'openblas']
+ library_dirs = ['/usr/local/lib']
+ """
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py
index e9cc334a5..68239495d 100644
--- a/numpy/distutils/msvc9compiler.py
+++ b/numpy/distutils/msvc9compiler.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler
diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py
index 0cb4bf979..681a254b8 100644
--- a/numpy/distutils/msvccompiler.py
+++ b/numpy/distutils/msvccompiler.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index 48584b4c4..951ce5fb8 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -1,13 +1,8 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import re
import os
-if sys.version_info[0] < 3:
- from ConfigParser import RawConfigParser
-else:
- from configparser import RawConfigParser
+from configparser import RawConfigParser
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
@@ -78,7 +73,7 @@ def parse_flags(line):
def _escape_backslash(val):
return val.replace('\\', '\\\\')
-class LibraryInfo(object):
+class LibraryInfo:
"""
Object containing build information about a library.
@@ -150,7 +145,7 @@ class LibraryInfo(object):
return "\n".join(m)
-class VariableSet(object):
+class VariableSet:
"""
Container object for the variables defined in a config file.
@@ -380,7 +375,6 @@ def read_config(pkgname, dirs=None):
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
- import sys
from optparse import OptionParser
import glob
diff --git a/numpy/distutils/numpy_distribution.py b/numpy/distutils/numpy_distribution.py
index 6ae19d16b..ea8182659 100644
--- a/numpy/distutils/numpy_distribution.py
+++ b/numpy/distutils/numpy_distribution.py
@@ -1,6 +1,4 @@
# XXX: Handle setuptools ?
-from __future__ import division, absolute_import, print_function
-
from distutils.core import Distribution
# This class is used because we add new files (sconscripts, and so on) with the
diff --git a/numpy/distutils/pathccompiler.py b/numpy/distutils/pathccompiler.py
index fc9872db3..48051810e 100644
--- a/numpy/distutils/pathccompiler.py
+++ b/numpy/distutils/pathccompiler.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from distutils.unixccompiler import UnixCCompiler
class PathScaleCCompiler(UnixCCompiler):
diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py
index 82a53bd08..69d35f5c2 100644
--- a/numpy/distutils/setup.py
+++ b/numpy/distutils/setup.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
+#!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('distutils', parent_package, top_path)
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 5fd1003ab..3a6a7b29d 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -1,51 +1,8 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
This file defines a set of system_info classes for getting
information about various resources (libraries, library directories,
-include directories, etc.) in the system. Currently, the following
-classes are available:
-
- atlas_info
- atlas_threads_info
- atlas_blas_info
- atlas_blas_threads_info
- lapack_atlas_info
- lapack_atlas_threads_info
- atlas_3_10_info
- atlas_3_10_threads_info
- atlas_3_10_blas_info,
- atlas_3_10_blas_threads_info,
- lapack_atlas_3_10_info
- lapack_atlas_3_10_threads_info
- flame_info
- blas_info
- lapack_info
- openblas_info
- blis_info
- blas_opt_info # usage recommended
- lapack_opt_info # usage recommended
- fftw_info,dfftw_info,sfftw_info
- fftw_threads_info,dfftw_threads_info,sfftw_threads_info
- djbfft_info
- x11_info
- lapack_src_info
- blas_src_info
- numpy_info
- numarray_info
- numpy_info
- boost_python_info
- agg2_info
- wx_info
- gdk_pixbuf_xlib_2_info
- gdk_pixbuf_2_info
- gdk_x11_2_info
- gtkp_x11_2_info
- gtkp_2_info
- xft_info
- freetype2_info
- umfpack_info
-
-Usage:
+include directories, etc.) in the system. Usage:
info_dict = get_info(<name>)
where <name> is a string 'atlas','x11','fftw','lapack','blas',
'lapack_src', 'blas_src', etc. For a complete list of allowed names,
@@ -73,19 +30,94 @@ The file 'site.cfg' is looked for in
The first one found is used to get system configuration options The
format is that used by ConfigParser (i.e., Windows .INI style). The
-section ALL has options that are the default for each section. The
-available sections are fftw, atlas, and x11. Appropriate defaults are
-used if nothing is specified.
+section ALL is not intended for general use.
+
+Appropriate defaults are used if nothing is specified.
The order of finding the locations of resources is the following:
1. environment variable
2. section in site.cfg
- 3. ALL section in site.cfg
+ 3. DEFAULT section in site.cfg
+ 4. System default search paths (see ``default_*`` variables below).
Only the first complete match is returned.
+Currently, the following classes are available, along with their section names:
+
+ Numeric_info:Numeric
+ _numpy_info:Numeric
+ _pkg_config_info:None
+ accelerate_info:accelerate
+ agg2_info:agg2
+ amd_info:amd
+ atlas_3_10_blas_info:atlas
+ atlas_3_10_blas_threads_info:atlas
+ atlas_3_10_info:atlas
+ atlas_3_10_threads_info:atlas
+ atlas_blas_info:atlas
+ atlas_blas_threads_info:atlas
+ atlas_info:atlas
+ atlas_threads_info:atlas
+ blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix)
+ blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS)
+ blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix)
+ blas_info:blas
+ blas_mkl_info:mkl
+ blas_opt_info:ALL # usage recommended
+ blas_src_info:blas_src
+ blis_info:blis
+ boost_python_info:boost_python
+ dfftw_info:fftw
+ dfftw_threads_info:fftw
+ djbfft_info:djbfft
+ f2py_info:ALL
+ fft_opt_info:ALL
+ fftw2_info:fftw
+ fftw3_info:fftw3
+ fftw_info:fftw
+ fftw_threads_info:fftw
+ flame_info:flame
+ freetype2_info:freetype2
+ gdk_2_info:gdk_2
+ gdk_info:gdk
+ gdk_pixbuf_2_info:gdk_pixbuf_2
+ gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2
+ gdk_x11_2_info:gdk_x11_2
+ gtkp_2_info:gtkp_2
+ gtkp_x11_2_info:gtkp_x11_2
+ lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix)
+ lapack_atlas_3_10_info:atlas
+ lapack_atlas_3_10_threads_info:atlas
+ lapack_atlas_info:atlas
+ lapack_atlas_threads_info:atlas
+ lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK)
+ lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix)
+ lapack_info:lapack
+ lapack_mkl_info:mkl
+ lapack_opt_info:ALL # usage recommended
+ lapack_src_info:lapack_src
+ mkl_info:mkl
+ numarray_info:numarray
+ numerix_info:numerix
+ numpy_info:numpy
+ openblas64__info:openblas64_
+ openblas64__lapack_info:openblas64_
+ openblas_clapack_info:openblas
+ openblas_ilp64_info:openblas_ilp64
+ openblas_ilp64_lapack_info:openblas_ilp64
+ openblas_info:openblas
+ openblas_lapack_info:openblas
+ sfftw_info:fftw
+ sfftw_threads_info:fftw
+ system_info:ALL
+ umfpack_info:umfpack
+ wx_info:wx
+ x11_info:x11
+ xft_info:xft
+
Example:
----------
-[ALL]
+[DEFAULT]
+# default section
library_dirs = /usr/lib:/usr/local/lib:/opt/lib
include_dirs = /usr/include:/usr/local/include:/opt/include
src_dirs = /usr/local/src:/opt/src
@@ -120,8 +152,6 @@ this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
-from __future__ import division, absolute_import, print_function
-
import sys
import os
import re
@@ -132,12 +162,8 @@ import textwrap
from glob import glob
from functools import reduce
-if sys.version_info[0] < 3:
- from ConfigParser import NoOptionError
- from ConfigParser import RawConfigParser as ConfigParser
-else:
- from configparser import NoOptionError
- from configparser import RawConfigParser as ConfigParser
+from configparser import NoOptionError
+from configparser import RawConfigParser as ConfigParser
# It seems that some people are importing ConfigParser from here so is
# good to keep its class name. Use of RawConfigParser is needed in
# order to be able to load path names with percent in them, like
@@ -151,12 +177,11 @@ from distutils.util import get_platform
from numpy.distutils.exec_command import (
find_executable, filepath_from_subprocess_output,
- get_pythonexe)
+ )
from numpy.distutils.misc_util import (is_sequence, is_string,
get_shared_lib_extension)
from numpy.distutils.command.config import config as cmd_config
-from numpy.distutils.compat import get_exception
-from numpy.distutils import customized_ccompiler
+from numpy.distutils import customized_ccompiler as _customized_ccompiler
from numpy.distutils import _shell_utils
import distutils.ccompiler
import tempfile
@@ -169,6 +194,15 @@ _bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
+global_compiler = None
+
+def customized_ccompiler():
+ global global_compiler
+ if not global_compiler:
+ global_compiler = _customized_ccompiler()
+ return global_compiler
+
+
def _c_string_literal(s):
"""
Convert a python string into a literal suitable for inclusion into C code
@@ -250,32 +284,29 @@ if sys.platform == 'win32':
default_include_dirs.extend(
os.path.join(library_root, d) for d in _include_dirs)
- if sys.version_info >= (3, 3):
- # VCpkg is the de-facto package manager on windows for C/C++
- # libraries. If it is on the PATH, then we append its paths here.
- # We also don't re-implement shutil.which for Python 2.7 because
- # vcpkg doesn't support MSVC 2008.
- vcpkg = shutil.which('vcpkg')
- if vcpkg:
- vcpkg_dir = os.path.dirname(vcpkg)
- if platform.architecture() == '32bit':
- specifier = 'x86'
- else:
- specifier = 'x64'
-
- vcpkg_installed = os.path.join(vcpkg_dir, 'installed')
- for vcpkg_root in [
- os.path.join(vcpkg_installed, specifier + '-windows'),
- os.path.join(vcpkg_installed, specifier + '-windows-static'),
- ]:
- add_system_root(vcpkg_root)
-
- # Conda is another popular package manager that provides libraries
- conda = shutil.which('conda')
- if conda:
- conda_dir = os.path.dirname(conda)
- add_system_root(os.path.join(conda_dir, '..', 'Library'))
- add_system_root(os.path.join(conda_dir, 'Library'))
+ # VCpkg is the de-facto package manager on windows for C/C++
+ # libraries. If it is on the PATH, then we append its paths here.
+ vcpkg = shutil.which('vcpkg')
+ if vcpkg:
+ vcpkg_dir = os.path.dirname(vcpkg)
+ if platform.architecture() == '32bit':
+ specifier = 'x86'
+ else:
+ specifier = 'x64'
+
+ vcpkg_installed = os.path.join(vcpkg_dir, 'installed')
+ for vcpkg_root in [
+ os.path.join(vcpkg_installed, specifier + '-windows'),
+ os.path.join(vcpkg_installed, specifier + '-windows-static'),
+ ]:
+ add_system_root(vcpkg_root)
+
+ # Conda is another popular package manager that provides libraries
+ conda = shutil.which('conda')
+ if conda:
+ conda_dir = os.path.dirname(conda)
+ add_system_root(os.path.join(conda_dir, '..', 'Library'))
+ add_system_root(os.path.join(conda_dir, 'Library'))
else:
default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
@@ -397,6 +428,10 @@ def get_info(name, notfound_action=0):
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'accelerate': accelerate_info, # use blas_opt instead
+ 'openblas64_': openblas64__info,
+ 'openblas64__lapack': openblas64__lapack_info,
+ 'openblas_ilp64': openblas_ilp64_info,
+ 'openblas_ilp64_lapack': openblas_ilp64_lapack_info,
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
@@ -419,7 +454,13 @@ def get_info(name, notfound_action=0):
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
+ 'lapack_ilp64_opt': lapack_ilp64_opt_info,
+ 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info,
+ 'lapack64__opt': lapack64__opt_info,
'blas_opt': blas_opt_info,
+ 'blas_ilp64_opt': blas_ilp64_opt_info,
+ 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info,
+ 'blas64__opt': blas64__opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
@@ -485,6 +526,13 @@ class LapackSrcNotFoundError(LapackNotFoundError):
the LAPACK_SRC environment variable."""
+class LapackILP64NotFoundError(NotFoundError):
+ """
+ 64-bit Lapack libraries not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_, openblas_ilp64
+ """
+
class BlasOptNotFoundError(NotFoundError):
"""
Optimized (vendor) Blas libraries are not found.
@@ -499,6 +547,12 @@ class BlasNotFoundError(NotFoundError):
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
+class BlasILP64NotFoundError(NotFoundError):
+ """
+ 64-bit Blas libraries not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_, openblas_ilp64
+ """
class BlasSrcNotFoundError(BlasNotFoundError):
"""
@@ -542,14 +596,18 @@ class UmfpackNotFoundError(NotFoundError):
the UMFPACK environment variable."""
-class system_info(object):
+class system_info:
""" get_info() is the only public method. Don't use others.
"""
- section = 'ALL'
dir_env_var = None
- search_static_first = 0 # XXX: disabled by default, may disappear in
- # future unless it is proved to be useful.
+ # XXX: search_static_first is disabled by default, may disappear in
+ # future unless it is proved to be useful.
+ search_static_first = 0
+ # The base-class section name is a random word "ALL" and is not really
+ # intended for general use. It cannot be None nor can it be DEFAULT as
+ # these break the ConfigParser. See gh-15338
+ section = 'ALL'
saved_results = {}
notfounderror = NotFoundError
@@ -677,7 +735,7 @@ class system_info(object):
return info
def get_info(self, notfound_action=0):
- """ Return a dictonary with items that are compatible
+ """ Return a dictionary with items that are compatible
with numpy.distutils.setup keyword arguments.
"""
flag = 0
@@ -1580,7 +1638,7 @@ def get_atlas_version(**config):
log.info('Status: %d', s)
log.info('Output: %s', o)
- if atlas_version == '3.2.1_pre3.3.6':
+ elif atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
@@ -1591,10 +1649,10 @@ def get_atlas_version(**config):
class lapack_opt_info(system_info):
-
notfounderror = LapackNotFoundError
- # Default order of LAPACK checks
+ # List of all known BLAS libraries, in the default order
lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'accelerate', 'lapack']
+ order_env_var_name = 'NPY_LAPACK_ORDER'
def _calc_info_mkl(self):
info = get_info('lapack_mkl')
@@ -1686,8 +1744,11 @@ class lapack_opt_info(system_info):
return True
return False
+ def _calc_info(self, name):
+ return getattr(self, '_calc_info_{}'.format(name))()
+
def calc_info(self):
- user_order = os.environ.get('NPY_LAPACK_ORDER', None)
+ user_order = os.environ.get(self.order_env_var_name, None)
if user_order is None:
lapack_order = self.lapack_order
else:
@@ -1707,7 +1768,7 @@ class lapack_opt_info(system_info):
"values: {}".format(non_existing))
for lapack in lapack_order:
- if getattr(self, '_calc_info_{}'.format(lapack))():
+ if self._calc_info(lapack):
return
if 'lapack' not in lapack_order:
@@ -1717,11 +1778,53 @@ class lapack_opt_info(system_info):
warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2)
-class blas_opt_info(system_info):
+class _ilp64_opt_info_mixin:
+ symbol_suffix = None
+ symbol_prefix = None
+
+ def _check_info(self, info):
+ macros = dict(info.get('define_macros', []))
+ prefix = macros.get('BLAS_SYMBOL_PREFIX', '')
+ suffix = macros.get('BLAS_SYMBOL_SUFFIX', '')
+
+ if self.symbol_prefix not in (None, prefix):
+ return False
+
+ if self.symbol_suffix not in (None, suffix):
+ return False
+ return bool(info)
+
+
+class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin):
+ notfounderror = LapackILP64NotFoundError
+ lapack_order = ['openblas64_', 'openblas_ilp64']
+ order_env_var_name = 'NPY_LAPACK_ILP64_ORDER'
+
+ def _calc_info(self, name):
+ info = get_info(name + '_lapack')
+ if self._check_info(info):
+ self.set_info(**info)
+ return True
+ return False
+
+
+class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info):
+ # Same as lapack_ilp64_opt_info, but fix symbol names
+ symbol_prefix = ''
+ symbol_suffix = ''
+
+
+class lapack64__opt_info(lapack_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = '64_'
+
+
+class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
- # Default order of BLAS checks
+ # List of all known BLAS libraries, in the default order
blas_order = ['mkl', 'blis', 'openblas', 'atlas', 'accelerate', 'blas']
+ order_env_var_name = 'NPY_BLAS_ORDER'
def _calc_info_mkl(self):
info = get_info('blas_mkl')
@@ -1786,8 +1889,11 @@ class blas_opt_info(system_info):
self.set_info(**info)
return True
+ def _calc_info(self, name):
+ return getattr(self, '_calc_info_{}'.format(name))()
+
def calc_info(self):
- user_order = os.environ.get('NPY_BLAS_ORDER', None)
+ user_order = os.environ.get(self.order_env_var_name, None)
if user_order is None:
blas_order = self.blas_order
else:
@@ -1805,7 +1911,7 @@ class blas_opt_info(system_info):
raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(non_existing))
for blas in blas_order:
- if getattr(self, '_calc_info_{}'.format(blas))():
+ if self._calc_info(blas):
return
if 'blas' not in blas_order:
@@ -1815,6 +1921,29 @@ class blas_opt_info(system_info):
warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2)
+class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin):
+ notfounderror = BlasILP64NotFoundError
+ blas_order = ['openblas64_', 'openblas_ilp64']
+ order_env_var_name = 'NPY_BLAS_ILP64_ORDER'
+
+ def _calc_info(self, name):
+ info = get_info(name)
+ if self._check_info(info):
+ self.set_info(**info)
+ return True
+ return False
+
+
+class blas_ilp64_plain_opt_info(blas_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = ''
+
+
+class blas64__opt_info(blas_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = '64_'
+
+
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
@@ -1914,12 +2043,24 @@ class openblas_info(blas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
+ _require_symbols = []
notfounderror = BlasNotFoundError
- def check_embedded_lapack(self, info):
- return True
+ @property
+ def symbol_prefix(self):
+ try:
+ return self.cp.get(self.section, 'symbol_prefix')
+ except NoOptionError:
+ return ''
- def calc_info(self):
+ @property
+ def symbol_suffix(self):
+ try:
+ return self.cp.get(self.section, 'symbol_suffix')
+ except NoOptionError:
+ return ''
+
+ def _calc_info(self):
c = customized_ccompiler()
lib_dirs = self.get_lib_dirs()
@@ -1937,23 +2078,33 @@ class openblas_info(blas_info):
# Try gfortran-compatible library files
info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)
# Skip lapack check, we'd need build_ext to do it
- assume_lapack = True
+ skip_symbol_check = True
elif info:
- assume_lapack = False
+ skip_symbol_check = False
info['language'] = 'c'
if info is None:
- return
+ return None
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
- if not (assume_lapack or self.check_embedded_lapack(info)):
- return
+ if not (skip_symbol_check or self.check_symbols(info)):
+ return None
info['define_macros'] = [('HAVE_CBLAS', None)]
- self.set_info(**info)
+ if self.symbol_prefix:
+ info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)]
+ if self.symbol_suffix:
+ info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)]
+
+ return info
+
+ def calc_info(self):
+ info = self._calc_info()
+ if info is not None:
+ self.set_info(**info)
def check_msvc_gfortran_libs(self, library_dirs, libraries):
# First, find the full path to each library directory
@@ -1969,16 +2120,17 @@ class openblas_info(blas_info):
return None
# Generate numpy.distutils virtual static library file
- tmpdir = os.path.join(os.getcwd(), 'build', 'openblas')
+ basename = self.__class__.__name__
+ tmpdir = os.path.join(os.getcwd(), 'build', basename)
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
info = {'library_dirs': [tmpdir],
- 'libraries': ['openblas'],
+ 'libraries': [basename],
'language': 'f77'}
- fake_lib_file = os.path.join(tmpdir, 'openblas.fobjects')
- fake_clib_file = os.path.join(tmpdir, 'openblas.cobjects')
+ fake_lib_file = os.path.join(tmpdir, basename + '.fobjects')
+ fake_clib_file = os.path.join(tmpdir, basename + '.cobjects')
with open(fake_lib_file, 'w') as f:
f.write("\n".join(library_paths))
with open(fake_clib_file, 'w') as f:
@@ -1986,24 +2138,27 @@ class openblas_info(blas_info):
return info
-class openblas_lapack_info(openblas_info):
- section = 'openblas'
- dir_env_var = 'OPENBLAS'
- _lib_names = ['openblas']
- notfounderror = BlasNotFoundError
-
- def check_embedded_lapack(self, info):
+ def check_symbols(self, info):
res = False
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
+
+ prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix,
+ symbol_name,
+ self.symbol_suffix)
+ for symbol_name in self._require_symbols)
+ calls = "\n".join("%s%s%s();" % (self.symbol_prefix,
+ symbol_name,
+ self.symbol_suffix)
+ for symbol_name in self._require_symbols)
s = textwrap.dedent("""\
- void zungqr_();
+ %(prototypes)s
int main(int argc, const char *argv[])
{
- zungqr_();
+ %(calls)s
return 0;
- }""")
+ }""") % dict(prototypes=prototypes, calls=calls)
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
@@ -2011,8 +2166,6 @@ class openblas_lapack_info(openblas_info):
extra_args = info['extra_link_args']
except Exception:
extra_args = []
- if sys.version_info < (3, 5) and sys.version_info > (3, 0) and c.compiler_type == "msvc":
- extra_args.append("/MANIFEST")
try:
with open(src, 'wt') as f:
f.write(s)
@@ -2028,9 +2181,49 @@ class openblas_lapack_info(openblas_info):
shutil.rmtree(tmpdir)
return res
+class openblas_lapack_info(openblas_info):
+ section = 'openblas'
+ dir_env_var = 'OPENBLAS'
+ _lib_names = ['openblas']
+ _require_symbols = ['zungqr_']
+ notfounderror = BlasNotFoundError
+
class openblas_clapack_info(openblas_lapack_info):
_lib_names = ['openblas', 'lapack']
+class openblas_ilp64_info(openblas_info):
+ section = 'openblas_ilp64'
+ dir_env_var = 'OPENBLAS_ILP64'
+ _lib_names = ['openblas64']
+ _require_symbols = ['dgemm_', 'cblas_dgemm']
+ notfounderror = BlasILP64NotFoundError
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info is not None:
+ info['define_macros'] += [('HAVE_BLAS_ILP64', None)]
+ return info
+
+class openblas_ilp64_lapack_info(openblas_ilp64_info):
+ _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr']
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info:
+ info['define_macros'] += [('HAVE_LAPACKE', None)]
+ return info
+
+class openblas64__info(openblas_ilp64_info):
+ # ILP64 Openblas, with default symbol suffix
+ section = 'openblas64_'
+ dir_env_var = 'OPENBLAS64_'
+ _lib_names = ['openblas64_']
+ symbol_suffix = '64_'
+ symbol_prefix = ''
+
+class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info):
+ pass
+
class blis_info(blas_info):
section = 'blis'
dir_env_var = 'BLIS'
@@ -2364,18 +2557,18 @@ class numerix_info(system_info):
try:
import numpy # noqa: F401
which = "numpy", "defaulted"
- except ImportError:
- msg1 = str(get_exception())
+ except ImportError as e:
+ msg1 = str(e)
try:
import Numeric # noqa: F401
which = "numeric", "defaulted"
- except ImportError:
- msg2 = str(get_exception())
+ except ImportError as e:
+ msg2 = str(e)
try:
import numarray # noqa: F401
which = "numarray", "defaulted"
- except ImportError:
- msg3 = str(get_exception())
+ except ImportError as e:
+ msg3 = str(e)
log.info(msg1)
log.info(msg2)
log.info(msg3)
diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py
index 37912f5ba..d6eb7d1c3 100644
--- a/numpy/distutils/tests/test_exec_command.py
+++ b/numpy/distutils/tests/test_exec_command.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import sys
from tempfile import TemporaryFile
@@ -10,12 +8,9 @@ from numpy.testing import tempdir, assert_, assert_warns
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from StringIO import StringIO
+from io import StringIO
-class redirect_stdout(object):
+class redirect_stdout:
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
@@ -30,7 +25,7 @@ class redirect_stdout(object):
# note: closing sys.stdout won't close it.
self._stdout.close()
-class redirect_stderr(object):
+class redirect_stderr:
"""Context manager to redirect stderr for exec_command test."""
def __init__(self, stderr=None):
self._stderr = stderr or sys.stderr
@@ -45,7 +40,7 @@ class redirect_stderr(object):
# note: closing sys.stderr won't close it.
self._stderr.close()
-class emulate_nonposix(object):
+class emulate_nonposix:
"""Context manager to emulate os.name != 'posix' """
def __init__(self, osname='non-posix'):
self._new_name = osname
@@ -98,7 +93,7 @@ def test_exec_command_stderr():
exec_command.exec_command("cd '.'")
-class TestExecCommand(object):
+class TestExecCommand:
def setup(self):
self.pyexe = get_pythonexe()
@@ -191,9 +186,8 @@ class TestExecCommand(object):
with tempdir() as tmpdir:
fn = "file"
tmpfile = os.path.join(tmpdir, fn)
- f = open(tmpfile, 'w')
- f.write('Hello')
- f.close()
+ with open(tmpfile, 'w') as f:
+ f.write('Hello')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py
index 6d245fbd4..dd97f1e72 100644
--- a/numpy/distutils/tests/test_fcompiler.py
+++ b/numpy/distutils/tests/test_fcompiler.py
@@ -1,8 +1,4 @@
-from __future__ import division, absolute_import, print_function
-
-import pytest
-
-from numpy.testing import assert_, suppress_warnings
+from numpy.testing import assert_
import numpy.distutils.fcompiler
customizable_flags = [
diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py
index 49208aace..0817ae58c 100644
--- a/numpy/distutils/tests/test_fcompiler_gnu.py
+++ b/numpy/distutils/tests/test_fcompiler_gnu.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from numpy.testing import assert_
import numpy.distutils.fcompiler
@@ -30,7 +28,7 @@ gfortran_version_strings = [
('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0')
]
-class TestG77Versions(object):
+class TestG77Versions:
def test_g77_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, version in g77_version_strings:
@@ -43,7 +41,7 @@ class TestG77Versions(object):
v = fc.version_match(vs)
assert_(v is None, (vs, v))
-class TestGFortranVersions(object):
+class TestGFortranVersions:
def test_gfortran_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, version in gfortran_version_strings:
diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py
index 5e014bada..45c9cdac1 100644
--- a/numpy/distutils/tests/test_fcompiler_intel.py
+++ b/numpy/distutils/tests/test_fcompiler_intel.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import numpy.distutils.fcompiler
from numpy.testing import assert_
@@ -16,7 +14,7 @@ intel_64bit_version_strings = [
"running on Intel(R) 64, Version 11.1", '11.1')
]
-class TestIntelFCompilerVersions(object):
+class TestIntelFCompilerVersions:
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
@@ -24,7 +22,7 @@ class TestIntelFCompilerVersions(object):
assert_(v == version)
-class TestIntelEM64TFCompilerVersions(object):
+class TestIntelEM64TFCompilerVersions:
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
diff --git a/numpy/distutils/tests/test_fcompiler_nagfor.py b/numpy/distutils/tests/test_fcompiler_nagfor.py
index 1c936056a..2e04f5266 100644
--- a/numpy/distutils/tests/test_fcompiler_nagfor.py
+++ b/numpy/distutils/tests/test_fcompiler_nagfor.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from numpy.testing import assert_
import numpy.distutils.fcompiler
@@ -16,7 +14,7 @@ nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release '
'431,435,437,446,459-460,463,472,494,496,503,508,'
'511,517,529,555,557,565)', '5.1')]
-class TestNagFCompilerVersions(object):
+class TestNagFCompilerVersions:
def test_version_match(self):
for comp, vs, version in nag_version_strings:
fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)
diff --git a/numpy/distutils/tests/test_mingw32ccompiler.py b/numpy/distutils/tests/test_mingw32ccompiler.py
new file mode 100644
index 000000000..ebedacb32
--- /dev/null
+++ b/numpy/distutils/tests/test_mingw32ccompiler.py
@@ -0,0 +1,42 @@
+import shutil
+import subprocess
+import sys
+import pytest
+
+from numpy.distutils import mingw32ccompiler
+
+
+@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test')
+def test_build_import():
+ '''Test the mingw32ccompiler.build_import_library, which builds a
+ `python.a` from the MSVC `python.lib`
+ '''
+
+ # make sure `nm.exe` exists and supports the current python version. This
+ # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit
+ try:
+ out = subprocess.check_output(['nm.exe', '--help'])
+ except FileNotFoundError:
+ pytest.skip("'nm.exe' not on path, is mingw installed?")
+ supported = out[out.find(b'supported targets:'):]
+ if sys.maxsize < 2**32:
+ if b'pe-i386' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 32-bit "
+ "dlls when using 32-bit python. Supported "
+ "formats: '%s'" % supported)
+ elif b'pe-x86-64' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 64-bit "
+ "dlls when using 64-bit python. Supported "
+ "formats: '%s'" % supported)
+ # Hide the import library to force a build
+ has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib()
+ if has_import_lib:
+ shutil.move(fullpath, fullpath + '.bak')
+
+ try:
+ # Whew, now we can actually test the function
+ mingw32ccompiler.build_import_library()
+
+ finally:
+ if has_import_lib:
+ shutil.move(fullpath + '.bak', fullpath)
diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py
index 3e239cf48..605c80483 100644
--- a/numpy/distutils/tests/test_misc_util.py
+++ b/numpy/distutils/tests/test_misc_util.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from os.path import join, sep, dirname
from numpy.distutils.misc_util import (
@@ -11,7 +9,7 @@ from numpy.testing import (
ajoin = lambda *paths: join(*((sep,)+paths))
-class TestAppendpath(object):
+class TestAppendpath:
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
@@ -35,7 +33,7 @@ class TestAppendpath(object):
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
-class TestMinrelpath(object):
+class TestMinrelpath:
def test_1(self):
n = lambda path: path.replace('/', sep)
@@ -49,7 +47,7 @@ class TestMinrelpath(object):
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
-class TestGpaths(object):
+class TestGpaths:
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
@@ -58,7 +56,7 @@ class TestGpaths(object):
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py') == f[0], repr(f))
-class TestSharedExtension(object):
+class TestSharedExtension:
def test_get_shared_lib_extension(self):
import sys
diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py
index 537e16e90..b287ebe2e 100644
--- a/numpy/distutils/tests/test_npy_pkg_config.py
+++ b/numpy/distutils/tests/test_npy_pkg_config.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
from numpy.distutils.npy_pkg_config import read_config, parse_flags
@@ -36,7 +34,7 @@ libs = -L${libdir}
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
-class TestLibraryInfo(object):
+class TestLibraryInfo:
def test_simple(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
@@ -63,7 +61,7 @@ class TestLibraryInfo(object):
out.vars['prefix'] = '/Users/david'
assert_(out.cflags() == '-I/Users/david/include')
-class TestParseFlags(object):
+class TestParseFlags:
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
assert_(d['include_dirs'] == ['/usr/include'])
diff --git a/numpy/distutils/tests/test_shell_utils.py b/numpy/distutils/tests/test_shell_utils.py
index a0344244f..32bd283e5 100644
--- a/numpy/distutils/tests/test_shell_utils.py
+++ b/numpy/distutils/tests/test_shell_utils.py
@@ -1,8 +1,5 @@
-from __future__ import division, absolute_import, print_function
-
import pytest
import subprocess
-import os
import json
import sys
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index 3c7638960..0768ffdde 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -1,5 +1,3 @@
-from __future__ import division, print_function
-
import os
import shutil
import pytest
@@ -9,7 +7,7 @@ from distutils.errors import DistutilsError
from numpy.testing import assert_, assert_equal, assert_raises
from numpy.distutils import ccompiler, customized_ccompiler
-from numpy.distutils.system_info import system_info, ConfigParser
+from numpy.distutils.system_info import system_info, ConfigParser, mkl_info
from numpy.distutils.system_info import AliasedOptionError
from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
from numpy.distutils import _shell_utils
@@ -130,7 +128,7 @@ class DuplicateOptionInfo(_system_info):
section = 'duplicate_options'
-class TestSystemInfoReading(object):
+class TestSystemInfoReading:
def setup(self):
""" Create the libraries """
@@ -255,3 +253,35 @@ class TestSystemInfoReading(object):
assert_(os.path.isfile(self._src2.replace('.c', '.o')))
finally:
os.chdir(previousDir)
+
+ def test_overrides(self):
+ previousDir = os.getcwd()
+ cfg = os.path.join(self._dir1, 'site.cfg')
+ shutil.copy(self._sitecfg, cfg)
+ try:
+ os.chdir(self._dir1)
+ # Check that the '[ALL]' section does not override
+ # missing values from other sections
+ info = mkl_info()
+ lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep)
+ assert info.get_lib_dirs() != lib_dirs
+
+ # But if we copy the values to a '[mkl]' section the value
+ # is correct
+ with open(cfg, 'r') as fid:
+ mkl = fid.read().replace('ALL', 'mkl')
+ with open(cfg, 'w') as fid:
+ fid.write(mkl)
+ info = mkl_info()
+ assert info.get_lib_dirs() == lib_dirs
+
+ # Also, the values will be taken from a section named '[DEFAULT]'
+ with open(cfg, 'r') as fid:
+ dflt = fid.read().replace('mkl', 'DEFAULT')
+ with open(cfg, 'w') as fid:
+ fid.write(dflt)
+ info = mkl_info()
+ assert info.get_lib_dirs() == lib_dirs
+ finally:
+ os.chdir(previousDir)
+
diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py
index 11b2cce52..5f36c439f 100644
--- a/numpy/distutils/unixccompiler.py
+++ b/numpy/distutils/unixccompiler.py
@@ -2,20 +2,13 @@
unixccompiler - can handle very long argument lists for ar.
"""
-from __future__ import division, absolute_import, print_function
-
import os
-from distutils.errors import DistutilsExecError, CompileError
-from distutils.unixccompiler import *
+from distutils.errors import CompileError, DistutilsExecError, LibError
+from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.ccompiler import replace_method
-from numpy.distutils.compat import get_exception
from numpy.distutils.misc_util import _commandline_dep_string
-
-if sys.version_info[0] < 3:
- from . import log
-else:
- from numpy.distutils import log
+from numpy.distutils import log
# Note that UnixCCompiler._compile appeared in Python 2.3
def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
@@ -56,8 +49,8 @@ def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps +
extra_postargs, display = display)
- except DistutilsExecError:
- msg = str(get_exception())
+ except DistutilsExecError as e:
+ msg = str(e)
raise CompileError(msg)
# add commandline flags to dependency file
@@ -128,8 +121,8 @@ def UnixCCompiler_create_static_lib(self, objects, output_libname,
try:
self.spawn(self.ranlib + [output_filename],
display = display)
- except DistutilsExecError:
- msg = str(get_exception())
+ except DistutilsExecError as e:
+ msg = str(e)
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
diff --git a/numpy/doc/__init__.py b/numpy/doc/__init__.py
index b6f1fa71c..8a944fecd 100644
--- a/numpy/doc/__init__.py
+++ b/numpy/doc/__init__.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
ref_dir = os.path.join(os.path.dirname(__file__))
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index 1871512bf..635c1b1b8 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -18,7 +18,7 @@ The primitive types supported are tied closely to those in C:
- C type
- Description
- * - `np.bool`
+ * - `np.bool_`
- ``bool``
- Boolean (True or False) stored as a byte
@@ -283,7 +283,7 @@ NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the
minimum or maximum values of NumPy integer and floating point values
respectively ::
- >>> np.iinfo(np.int) # Bounds of the default integer on this system.
+ >>> np.iinfo(int) # Bounds of the default integer on this system.
iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
>>> np.iinfo(np.int32) # Bounds of a 32-bit integer
iinfo(min=-2147483648, max=2147483647, dtype=int32)
@@ -339,4 +339,3 @@ be useful to test your code with the value
``1 + np.finfo(np.longdouble).eps``.
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py
index cb548a0d0..63975e6a9 100644
--- a/numpy/doc/broadcasting.py
+++ b/numpy/doc/broadcasting.py
@@ -178,4 +178,3 @@ making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/byteswapping.py b/numpy/doc/byteswapping.py
index 7a749c8d5..fe9461977 100644
--- a/numpy/doc/byteswapping.py
+++ b/numpy/doc/byteswapping.py
@@ -153,4 +153,3 @@ can be achieved with the ndarray astype method:
False
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/constants.py b/numpy/doc/constants.py
index 72793e44d..2c629ad33 100644
--- a/numpy/doc/constants.py
+++ b/numpy/doc/constants.py
@@ -13,9 +13,8 @@ NumPy includes several constants:
#
# Note: the docstring is autogenerated.
#
-from __future__ import division, absolute_import, print_function
-
-import textwrap, re
+import re
+import textwrap
# Maintain same format as in numpy.add_newdocs
constants = []
diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py
index 9ebe938be..067f8bb33 100644
--- a/numpy/doc/creation.py
+++ b/numpy/doc/creation.py
@@ -141,4 +141,3 @@ random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/dispatch.py b/numpy/doc/dispatch.py
index c9029941b..ba76a43ae 100644
--- a/numpy/doc/dispatch.py
+++ b/numpy/doc/dispatch.py
@@ -58,7 +58,7 @@ numpy.ndarray
How can we pass our custom array type through this function? Numpy allows a
class to indicate that it would like to handle computations in a custom-defined
-way through the interaces ``__array_ufunc__`` and ``__array_function__``. Let's
+way through the interfaces ``__array_ufunc__`` and ``__array_function__``. Let's
take one at a time, starting with ``_array_ufunc__``. This method covers
:ref:`ufuncs`, a class of functions that includes, for example,
:func:`numpy.multiply` and :func:`numpy.sin`.
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
index 7d1c9a1d5..6d2e0010f 100644
--- a/numpy/doc/glossary.py
+++ b/numpy/doc/glossary.py
@@ -182,7 +182,7 @@ Glossary
instance
A class definition gives the blueprint for constructing an object::
- >>> class House(object):
+ >>> class House:
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
@@ -336,7 +336,7 @@ Glossary
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
- >>> class Paintbrush(object):
+ >>> class Paintbrush:
... color = 'blue'
...
... def paint(self):
@@ -473,4 +473,3 @@ Glossary
and f2py (which wraps Fortran).
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py
index 676015668..6b15a7a9e 100644
--- a/numpy/doc/indexing.py
+++ b/numpy/doc/indexing.py
@@ -372,8 +372,7 @@ exceptions (assigning complex to floats or ints): ::
>>> x[1]
1
>>> x[1] = 1.2j
- <type 'exceptions.TypeError'>: can't convert complex to long; use
- long(abs(z))
+ TypeError: can't convert complex to int
Unlike some of the references (such as array and mask indices)
@@ -446,4 +445,3 @@ converted to an array as a list would be. As an example: ::
40
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/internals.py b/numpy/doc/internals.py
index a14fee7c2..6718f1108 100644
--- a/numpy/doc/internals.py
+++ b/numpy/doc/internals.py
@@ -160,4 +160,3 @@ when accessing elements of an array. Granted, it goes against the grain, but
it is more in line with Python semantics and the natural order of the data.
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py
index a76abe164..fc1c4cd01 100644
--- a/numpy/doc/misc.py
+++ b/numpy/doc/misc.py
@@ -224,4 +224,3 @@ Interfacing to C++:
5) SIP (used mainly in PyQT)
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
index 1343d2adc..72990cf89 100644
--- a/numpy/doc/structured_arrays.py
+++ b/numpy/doc/structured_arrays.py
@@ -644,4 +644,3 @@ attribute takes precedence. Such fields will be inaccessible by attribute but
will still be accessible by index.
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index d0685328e..5a54ddd90 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -114,7 +114,7 @@ For example, consider the following Python code:
.. testcode::
- class C(object):
+ class C:
def __new__(cls, *args):
print('Cls in __new__:', cls)
print('Args in __new__:', args)
@@ -454,7 +454,7 @@ following.
input numpy as np
class A(np.ndarray):
- def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
args = []
in_no = []
for i, input_ in enumerate(inputs):
@@ -464,7 +464,7 @@ following.
else:
args.append(input_)
- outputs = kwargs.pop('out', None)
+ outputs = out
out_no = []
if outputs:
out_args = []
@@ -750,4 +750,3 @@ This object is now compatible with ``np.sum`` again because any extraneous argum
``**unused_kwargs`` parameter.
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py
index df2c455ec..eecc15083 100644
--- a/numpy/doc/ufuncs.py
+++ b/numpy/doc/ufuncs.py
@@ -135,4 +135,3 @@ results in an error. There are two alternatives:
a convenient way to apply these operators.
"""
-from __future__ import division, absolute_import, print_function
diff --git a/numpy/dual.py b/numpy/dual.py
index 651e845bb..92afec52d 100644
--- a/numpy/dual.py
+++ b/numpy/dual.py
@@ -10,8 +10,6 @@ NumPy.
.. _Scipy : https://www.scipy.org
"""
-from __future__ import division, absolute_import, print_function
-
# This module should be used for functions both in numpy and scipy if
# you want to use the numpy version if available but the scipy version
# otherwise.
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index 42e3632fd..949bac0ff 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -1,17 +1,13 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Fortran to Python Interface Generator.
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['run_main', 'compile', 'f2py_testing']
import sys
import subprocess
import os
-import numpy as np
-
from . import f2py2e
from . import f2py_testing
from . import diagnose
@@ -89,7 +85,7 @@ def compile(source,
args = ['-c', '-m', modulename, f.name]
- if isinstance(extra_args, np.compat.basestring):
+ if isinstance(extra_args, str):
is_posix = (os.name == 'posix')
extra_args = shlex.split(extra_args, posix=is_posix)
diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py
index 708f7f362..c6115070e 100644
--- a/numpy/f2py/__main__.py
+++ b/numpy/f2py/__main__.py
@@ -1,6 +1,4 @@
# See http://cens.ioc.ee/projects/f2py2e/
-from __future__ import division, print_function
-
from numpy.f2py.f2py2e import main
main()
diff --git a/numpy/f2py/__version__.py b/numpy/f2py/__version__.py
index 49a2199bf..104c2e1a8 100644
--- a/numpy/f2py/__version__.py
+++ b/numpy/f2py/__version__.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
major = 2
try:
diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py
index 404bdbd2d..80b150655 100644
--- a/numpy/f2py/auxfuncs.py
+++ b/numpy/f2py/auxfuncs.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Auxiliary functions for f2py2e.
@@ -14,8 +14,6 @@ $Date: 2005/07/24 19:01:55 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
import pprint
import sys
import types
@@ -552,7 +550,7 @@ class F2PYError(Exception):
pass
-class throw_error(object):
+class throw_error:
def __init__(self, mess):
self.mess = mess
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
index c41dd77c6..fabbfc4c2 100644
--- a/numpy/f2py/capi_maps.py
+++ b/numpy/f2py/capi_maps.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Copyright 1999,2000 Pearu Peterson all rights reserved,
@@ -11,8 +11,6 @@ $Date: 2005/05/06 10:57:33 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
__version__ = "$Revision: 1.60 $"[10:-1]
from . import __version__
@@ -21,11 +19,10 @@ f2py_version = __version__.version
import copy
import re
import os
-import sys
from .crackfortran import markoutercomma
from . import cb_rules
-# The eviroment provided by auxfuncs.py is needed for some calls to eval.
+# The environment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
@@ -79,7 +76,7 @@ c2capi_map = {'double': 'NPY_DOUBLE',
'complex_long_double': 'NPY_CDOUBLE', # forced casting
'string': 'NPY_STRING'}
-# These new maps aren't used anyhere yet, but should be by default
+# These new maps aren't used anywhere yet, but should be by default
# unless building numeric or numarray extensions.
if using_newcore:
c2capi_map = {'double': 'NPY_DOUBLE',
@@ -149,11 +146,7 @@ c2buildvalue_map = {'double': 'd',
'complex_float': 'N',
'complex_double': 'N',
'complex_long_double': 'N',
- 'string': 'z'}
-
-if sys.version_info[0] >= 3:
- # Bytes, not Unicode strings
- c2buildvalue_map['string'] = 'y'
+ 'string': 'y'}
if using_newcore:
# c2buildvalue_map=???
@@ -179,17 +172,29 @@ f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double',
'character': {'': 'string'}
}
-if os.path.isfile('.f2py_f2cmap'):
+f2cmap_default = copy.deepcopy(f2cmap_all)
+
+
+def load_f2cmap_file(f2cmap_file):
+ global f2cmap_all
+
+ f2cmap_all = copy.deepcopy(f2cmap_default)
+
+ if f2cmap_file is None:
+ # Default value
+ f2cmap_file = '.f2py_f2cmap'
+ if not os.path.isfile(f2cmap_file):
+ return
+
# User defined additions to f2cmap_all.
- # .f2py_f2cmap must contain a dictionary of dictionaries, only. For
+ # f2cmap_file must contain a dictionary of dictionaries, only. For
# example, {'real':{'low':'float'}} means that Fortran 'real(low)' is
# interpreted as C 'float'. This feature is useful for F90/95 users if
# they use PARAMETERSs in type specifications.
try:
- outmess('Reading .f2py_f2cmap ...\n')
- f = open('.f2py_f2cmap', 'r')
- d = eval(f.read(), {}, {})
- f.close()
+ outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file))
+ with open(f2cmap_file, 'r') as f:
+ d = eval(f.read(), {}, {})
for k, d1 in list(d.items()):
for k1 in list(d1.keys()):
d1[k1.lower()] = d1[k1]
@@ -208,10 +213,10 @@ if os.path.isfile('.f2py_f2cmap'):
else:
errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % (
k, k1, d[k][k1], d[k][k1], list(c2py_map.keys())))
- outmess('Successfully applied user defined changes from .f2py_f2cmap\n')
+ outmess('Successfully applied user defined f2cmap changes\n')
except Exception as msg:
errmess(
- 'Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg))
+ 'Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg))
cformat_map = {'double': '%g',
'float': '%g',
@@ -313,7 +318,6 @@ def getstrlength(var):
def getarrdims(a, var, verbose=0):
- global depargs
ret = {}
if isstring(var) and not isarray(var):
ret['dims'] = getstrlength(var)
@@ -509,7 +513,6 @@ def sign2map(a, var):
varrfromat
intent
"""
- global lcb_map, cb_map
out_a = a
if isintent_out(var):
for k in var['intent']:
diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py
index 183d7c2f9..87887c152 100644
--- a/numpy/f2py/cb_rules.py
+++ b/numpy/f2py/cb_rules.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Build call-back mechanism for f2py2e.
@@ -13,8 +13,6 @@ $Date: 2005/07/20 11:27:58 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
from . import __version__
from .auxfuncs import (
applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray,
@@ -438,7 +436,6 @@ cb_map = {}
def buildcallbacks(m):
- global cb_map
cb_map[m['name']] = []
for bi in m['body']:
if bi['block'] == 'interface':
@@ -450,7 +447,6 @@ def buildcallbacks(m):
def buildcallback(rout, um):
- global cb_map
from . import capi_maps
outmess('\tConstructing call-back function "cb_%s_in_%s"\n' %
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index ccb7b3a32..f1ac214d4 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
C declarations, CPP macros, and C functions for f2py2e.
@@ -14,8 +14,6 @@ $Date: 2005/05/06 11:42:34 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
import sys
import copy
@@ -542,7 +540,7 @@ cppmacros[
'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
cppmacros['OLDPYNUM'] = """\
#ifdef OLDPYNUM
-#error You need to install NumPy version 13 or higher. See https://scipy.org/install.html
+#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html
#endif
"""
################# C functions ###############
@@ -646,7 +644,6 @@ fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(cha
tmp = obj;
Py_INCREF(tmp);
}
-#if PY_VERSION_HEX >= 0x03000000
else if (PyUnicode_Check(obj)) {
tmp = PyUnicode_AsASCIIString(obj);
}
@@ -661,11 +658,6 @@ fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(cha
tmp = NULL;
}
}
-#else
- else {
- tmp = PyObject_Str(obj);
- }
-#endif
if (tmp == NULL) goto capi_fail;
if (*len == -1)
*len = PyString_GET_SIZE(tmp);
@@ -1041,6 +1033,8 @@ cfuncs[
'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX']
+
+# create the list of arguments to be used when calling back to python
cfuncs['create_cb_arglist'] = """\
static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) {
PyObject *tmp = NULL;
@@ -1066,6 +1060,10 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg
tmp_fun = fun; /* built-in function */
Py_INCREF(tmp_fun);
tot = maxnofargs;
+ if (PyCFunction_Check(fun)) {
+ /* In case the function has a co_argcount (like on PyPy) */
+ di = 0;
+ }
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
}
@@ -1094,13 +1092,8 @@ if (tmp_fun==NULL) {
fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name));
goto capi_fail;
}
-#if PY_VERSION_HEX >= 0x03000000
if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) {
-#else
- if (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
- if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) {
-#endif
PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\");
Py_DECREF(tmp);
if (tmp_argcount == NULL) {
@@ -1111,13 +1104,8 @@ goto capi_fail;
}
}
/* Get the number of optional arguments */
-#if PY_VERSION_HEX >= 0x03000000
if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) {
if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
-#else
- if (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) {
- if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\")))
-#endif
opt = PyTuple_Size(tmp);
Py_XDECREF(tmp);
}
@@ -1174,7 +1162,7 @@ def buildcfuncs():
############ Auxiliary functions for sorting needs ###################
def append_needs(need, flag=1):
- global outneeds, needs
+ # This function modifies the contents of the global `outneeds` dict.
if isinstance(need, list):
for n in need:
append_needs(n, flag)
@@ -1241,7 +1229,7 @@ def append_needs(need, flag=1):
def get_needs():
- global outneeds, needs
+ # This function modifies the contents of the global `outneeds` dict.
res = {}
for n in outneeds.keys():
out = []
diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py
index f61d8810a..90483e55b 100644
--- a/numpy/f2py/common_rules.py
+++ b/numpy/f2py/common_rules.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Build common block mechanism for f2py2e.
@@ -13,8 +13,6 @@ $Date: 2005/05/06 10:57:33 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
__version__ = "$Revision: 1.19 $"[10:-1]
from . import __version__
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 2aaf5d7c6..3d2f97a56 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
@@ -138,8 +138,6 @@ TODO:
The above may be solved by creating appropriate preprocessor program, for example.
"""
-from __future__ import division, absolute_import, print_function
-
import sys
import string
import fileinput
@@ -150,7 +148,7 @@ import platform
from . import __version__
-# The eviroment provided by auxfuncs.py is needed for some calls to eval.
+# The environment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
@@ -558,7 +556,8 @@ groupbegins90 = groupbegins77 + \
r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90 = re.compile(
beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
-groupends = r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
+groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|'
+ r'endinterface|endsubroutine|endfunction')
endpattern = re.compile(
beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end'
# endifs='end\s*(if|do|where|select|while|forall)'
@@ -580,8 +579,8 @@ publicpattern = re.compile(
beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'
privatepattern = re.compile(
beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'
-intrisicpattern = re.compile(
- beforethisafter % ('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic'
+intrinsicpattern = re.compile(
+ beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic'
intentpattern = re.compile(beforethisafter % (
'', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent'
parameterpattern = re.compile(
@@ -706,7 +705,7 @@ def crackline(line, reset=0):
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
- intrisicpattern,
+ intrinsicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
@@ -1098,7 +1097,7 @@ def analyzeline(m, case, line):
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
- elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']:
+ elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']:
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()
i = ll.find('::')
@@ -1158,7 +1157,7 @@ def analyzeline(m, case, line):
else:
errmess('analyzeline: intent(callback) %s is already'
' in argument list' % (k))
- if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']:
+ if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']:
ap = case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
@@ -1750,10 +1749,12 @@ def setattrspec(decl, attr, force=0):
decl['attrspec'].append(attr)
elif attr == 'automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
- elif attr == 'public' and 'private' not in decl['attrspec']:
- decl['attrspec'].append(attr)
- elif attr == 'private' and 'public' not in decl['attrspec']:
- decl['attrspec'].append(attr)
+ elif attr == 'public':
+ if 'private' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
+ elif attr == 'private':
+ if 'public' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
@@ -3114,11 +3115,12 @@ def true_intent_list(var):
ret = []
for intent in lst:
try:
- c = eval('isintent_%s(var)' % intent)
- except NameError:
- c = 0
- if c:
- ret.append(intent)
+ f = globals()['isintent_%s' % intent]
+ except KeyError:
+ pass
+ else:
+ if f(var):
+ ret.append(intent)
return ret
diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py
index 0241fed12..21ee399f0 100644
--- a/numpy/f2py/diagnose.py
+++ b/numpy/f2py/diagnose.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
+#!/usr/bin/env python3
import os
import sys
import tempfile
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index 110337f92..71a049e41 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
f2py2e - Fortran to Python C/API generator. 2nd Edition.
@@ -14,8 +14,6 @@ $Date: 2005/05/06 08:31:19 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
import sys
import os
import pprint
@@ -28,6 +26,7 @@ from . import auxfuncs
from . import cfuncs
from . import f90mod_rules
from . import __version__
+from . import capi_maps
f2py_version = __version__.version
errmess = sys.stderr.write
@@ -118,6 +117,9 @@ Options:
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
+ --f2cmap <filename> Load Fortran-to-Python KIND specification from the given
+ file. Default: .f2py_f2cmap in current directory.
+
--quiet Run quietly.
--verbose Run with extra verbosity.
-v Print f2py version ID and exit.
@@ -167,7 +169,7 @@ Extra options (only effective with -c):
Version: %s
numpy Version: %s
-Requires: Python 2.3 or higher.
+Requires: Python 3.5 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2011 Pearu Peterson all rights reserved.
http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version)
@@ -175,7 +177,7 @@ http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version)
def scaninputline(inputline):
files, skipfuncs, onlyfuncs, debug = [], [], [], []
- f, f2, f3, f5, f6, f7, f8, f9 = 1, 0, 0, 0, 0, 0, 0, 0
+ f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0
verbose = 1
dolc = -1
dolatexdoc = 0
@@ -226,6 +228,8 @@ def scaninputline(inputline):
f8 = 1
elif l == '--f2py-wrapper-output':
f9 = 1
+ elif l == '--f2cmap':
+ f10 = 1
elif l == '--overwrite-signature':
options['h-overwrite'] = 1
elif l == '-h':
@@ -267,6 +271,9 @@ def scaninputline(inputline):
elif f9:
f9 = 0
options["f2py_wrapper_output"] = l
+ elif f10:
+ f10 = 0
+ options["f2cmap_file"] = l
elif f == 1:
try:
with open(l):
@@ -312,6 +319,7 @@ def scaninputline(inputline):
options['wrapfuncs'] = wrapfuncs
options['buildpath'] = buildpath
options['include_paths'] = include_paths
+ options.setdefault('f2cmap_file', None)
return files, options
@@ -422,6 +430,7 @@ def run_main(comline_list):
fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c')
files, options = scaninputline(comline_list)
auxfuncs.options = options
+ capi_maps.load_f2cmap_file(options['f2cmap_file'])
postlist = callcrackfortran(files, options)
isusedby = {}
for i in range(len(postlist)):
@@ -574,7 +583,7 @@ def run_compile():
modulename = 'untitled'
sources = sys.argv[1:]
- for optname in ['--include_paths', '--include-paths']:
+ for optname in ['--include_paths', '--include-paths', '--f2cmap']:
if optname in sys.argv:
i = sys.argv.index(optname)
f2py_flags.extend(sys.argv[i:i + 2])
diff --git a/numpy/f2py/f2py_testing.py b/numpy/f2py/f2py_testing.py
index f5d5fa63d..1f109e67a 100644
--- a/numpy/f2py/f2py_testing.py
+++ b/numpy/f2py/f2py_testing.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import re
diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py
index 85eae8047..122fa8939 100644
--- a/numpy/f2py/f90mod_rules.py
+++ b/numpy/f2py/f90mod_rules.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Build F90 module support for f2py2e.
@@ -13,8 +13,6 @@ $Date: 2005/02/03 19:30:23 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
__version__ = "$Revision: 1.27 $"[10:-1]
f2py_version = 'See `f2py -v`'
@@ -25,7 +23,7 @@ from . import capi_maps
from . import func2subr
from .crackfortran import undo_rmbadname, undo_rmbadname1
-# The eviroment provided by auxfuncs.py is needed for some calls to eval.
+# The environment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
@@ -87,7 +85,6 @@ fgetdims2_sa = """\
def buildhooks(pymod):
- global fgetdims1, fgetdims2
from . import rules
ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [],
'need': ['F_FUNC', 'arrayobject.h'],
@@ -180,7 +177,7 @@ def buildhooks(pymod):
(m['name'], undo_rmbadname1(n)))
fadd('integer flag\n')
fhooks[0] = fhooks[0] + fgetdims1
- dms = eval('range(1,%s+1)' % (dm['rank']))
+ dms = range(1, int(dm['rank']) + 1)
fadd(' allocate(d(%s))\n' %
(','.join(['s(%s)' % i for i in dms])))
fhooks[0] = fhooks[0] + use_fgetdims2
diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py
index 6010d5a23..e9976f43c 100644
--- a/numpy/f2py/func2subr.py
+++ b/numpy/f2py/func2subr.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Rules for building C/API module with f2py2e.
@@ -13,8 +13,6 @@ $Date: 2004/11/26 11:13:06 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
__version__ = "$Revision: 1.16 $"[10:-1]
f2py_version = 'See `f2py -v`'
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index f2f713bde..6750bf705 100755
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Rules for building C/API module with f2py2e.
@@ -50,8 +50,6 @@ $Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
__version__ = "$Revision: 1.129 $"[10:-1]
from . import __version__
@@ -180,7 +178,6 @@ static PyMethodDef f2py_module_methods[] = {
\t{NULL,NULL}
};
-#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
\tPyModuleDef_HEAD_INIT,
\t"#modulename#",
@@ -192,35 +189,20 @@ static struct PyModuleDef moduledef = {
\tNULL,
\tNULL
};
-#endif
-#if PY_VERSION_HEX >= 0x03000000
-#define RETVAL m
PyMODINIT_FUNC PyInit_#modulename#(void) {
-#else
-#define RETVAL
-PyMODINIT_FUNC init#modulename#(void) {
-#endif
\tint i;
\tPyObject *m,*d, *s, *tmp;
-#if PY_VERSION_HEX >= 0x03000000
\tm = #modulename#_module = PyModule_Create(&moduledef);
-#else
-\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods);
-#endif
\tPy_TYPE(&PyFortran_Type) = &PyType_Type;
\timport_array();
\tif (PyErr_Occurred())
-\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;}
+\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R""" + """evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
\tPy_DECREF(s);
-#if PY_VERSION_HEX >= 0x03000000
\ts = PyUnicode_FromString(
-#else
-\ts = PyString_FromString(
-#endif
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\tPy_DECREF(s);
@@ -245,7 +227,7 @@ PyMODINIT_FUNC init#modulename#(void) {
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
-\treturn RETVAL;
+\treturn m;
}
#ifdef __cplusplus
}
@@ -294,7 +276,7 @@ static PyObject *#apiname#(const PyObject *capi_self,
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
-\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
+\t\t\"#argformat#|#keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
@@ -448,11 +430,7 @@ rout_rules = [
tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
PyObject_SetAttrString(o,"_cpointer", tmp);
Py_DECREF(tmp);
-#if PY_VERSION_HEX >= 0x03000000
s = PyUnicode_FromString("#name#");
-#else
- s = PyString_FromString("#name#");
-#endif
PyObject_SetAttrString(o,"__name__", s);
Py_DECREF(s);
}
@@ -490,11 +468,7 @@ rout_rules = [
tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
PyObject_SetAttrString(o,"_cpointer", tmp);
Py_DECREF(tmp);
-#if PY_VERSION_HEX >= 0x03000000
s = PyUnicode_FromString("#name#");
-#else
- s = PyString_FromString("#name#");
-#endif
PyObject_SetAttrString(o,"__name__", s);
Py_DECREF(s);
}
@@ -1064,8 +1038,10 @@ if (#varname#_capi==Py_None) {
'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
-\t\tif (!PyErr_Occurred())
-\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
+\t\tPyObject *exc, *val, *tb;
+\t\tPyErr_Fetch(&exc, &val, &tb);
+\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
+\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
\t} else {
\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
""",
@@ -1081,8 +1057,10 @@ if (#varname#_capi==Py_None) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
-\t\t\tif (!PyErr_Occurred())
-\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
+\t\t\tPyObject *exc, *val, *tb;
+\t\t\tPyErr_Fetch(&exc, &val, &tb);
+\t\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
+\t\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
\t\t\tf2py_success = 0;
\t\t}
\t}
@@ -1179,7 +1157,6 @@ def buildmodule(m, um):
"""
Return
"""
- global f2py_version, options
outmess('\tBuilding module "%s"...\n' % (m['name']))
ret = {}
mod_rules = defmod_rules[:]
@@ -1467,16 +1444,6 @@ def buildapi(rout):
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
- # Workaround for Python 2.6, 2.6.1 bug: https://bugs.python.org/issue4720
- if rd['keyformat'] or rd['xaformat']:
- argformat = rd['argformat']
- if isinstance(argformat, list):
- argformat.append('|')
- else:
- assert isinstance(argformat, str), repr(
- (argformat, type(argformat)))
- rd['argformat'] += '|'
-
ar = applyrules(routine_rules, rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n' % (ar['docshort']))
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
index a8c1401aa..6314c5af3 100644
--- a/numpy/f2py/setup.py
+++ b/numpy/f2py/setup.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
setup.py for installing F2PY
@@ -16,8 +16,6 @@ $Date: 2005/01/30 17:22:14 $
Pearu Peterson
"""
-from __future__ import division, print_function
-
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index 8aa55555d..b3a04bcf0 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -115,14 +115,6 @@ fortran_dealloc(PyFortranObject *fp) {
}
-#if PY_VERSION_HEX >= 0x03000000
-#else
-static PyMethodDef fortran_methods[] = {
- {NULL, NULL} /* sentinel */
-};
-#endif
-
-
/* Returns number of bytes consumed from buf, or -1 on error. */
static Py_ssize_t
format_def(char *buf, Py_ssize_t size, FortranDataDef def)
@@ -242,11 +234,7 @@ fortran_doc(FortranDataDef def)
size--;
/* p now points one beyond the last character of the string in buf */
-#if PY_VERSION_HEX >= 0x03000000
s = PyUnicode_FromStringAndSize(buf, p - buf);
-#else
- s = PyString_FromStringAndSize(buf, p - buf);
-#endif
PyMem_Free(buf);
return s;
@@ -272,8 +260,11 @@ static PyObject *
fortran_getattr(PyFortranObject *fp, char *name) {
int i,j,k,flag;
if (fp->dict != NULL) {
- PyObject *v = PyDict_GetItemString(fp->dict, name);
- if (v != NULL) {
+ PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name);
+ if (v == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
+ else if (v != NULL) {
Py_INCREF(v);
return v;
}
@@ -306,7 +297,6 @@ fortran_getattr(PyFortranObject *fp, char *name) {
return fp->dict;
}
if (strcmp(name,"__doc__")==0) {
-#if PY_VERSION_HEX >= 0x03000000
PyObject *s = PyUnicode_FromString(""), *s2, *s3;
for (i=0;i<fp->len;i++) {
s2 = fortran_doc(fp->defs[i]);
@@ -315,11 +305,6 @@ fortran_getattr(PyFortranObject *fp, char *name) {
Py_DECREF(s);
s = s3;
}
-#else
- PyObject *s = PyString_FromString("");
- for (i=0;i<fp->len;i++)
- PyString_ConcatAndDel(&s,fortran_doc(fp->defs[i]));
-#endif
if (PyDict_SetItemString(fp->dict, name, s))
return NULL;
return s;
@@ -330,17 +315,11 @@ fortran_getattr(PyFortranObject *fp, char *name) {
return NULL;
return cobj;
}
-#if PY_VERSION_HEX >= 0x03000000
- if (1) {
- PyObject *str, *ret;
- str = PyUnicode_FromString(name);
- ret = PyObject_GenericGetAttr((PyObject *)fp, str);
- Py_DECREF(str);
- return ret;
- }
-#else
- return Py_FindMethod(fortran_methods, (PyObject *)fp, name);
-#endif
+ PyObject *str, *ret;
+ str = PyUnicode_FromString(name);
+ ret = PyObject_GenericGetAttr((PyObject *)fp, str);
+ Py_DECREF(str);
+ return ret;
}
static int
@@ -434,48 +413,26 @@ fortran_repr(PyFortranObject *fp)
PyObject *name = NULL, *repr = NULL;
name = PyObject_GetAttrString((PyObject *)fp, "__name__");
PyErr_Clear();
-#if PY_VERSION_HEX >= 0x03000000
if (name != NULL && PyUnicode_Check(name)) {
repr = PyUnicode_FromFormat("<fortran %U>", name);
}
else {
repr = PyUnicode_FromString("<fortran object>");
}
-#else
- if (name != NULL && PyString_Check(name)) {
- repr = PyString_FromFormat("<fortran %s>", PyString_AsString(name));
- }
- else {
- repr = PyString_FromString("<fortran object>");
- }
-#endif
Py_XDECREF(name);
return repr;
}
PyTypeObject PyFortran_Type = {
-#if PY_VERSION_HEX >= 0x03000000
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(0)
- 0, /*ob_size*/
-#endif
- "fortran", /*tp_name*/
- sizeof(PyFortranObject), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- /* methods */
- (destructor)fortran_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- (getattrfunc)fortran_getattr, /*tp_getattr*/
- (setattrfunc)fortran_setattr, /*tp_setattr*/
- 0, /*tp_compare/tp_reserved*/
- (reprfunc)fortran_repr, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash*/
- (ternaryfunc)fortran_call, /*tp_call*/
+ .tp_name ="fortran",
+ .tp_basicsize = sizeof(PyFortranObject),
+ .tp_dealloc = (destructor)fortran_dealloc,
+ .tp_getattr = (getattrfunc)fortran_getattr,
+ .tp_setattr = (setattrfunc)fortran_setattr,
+ .tp_repr = (reprfunc)fortran_repr,
+ .tp_call = (ternaryfunc)fortran_call,
};
/************************* f2py_report_atexit *******************************/
@@ -626,7 +583,7 @@ count_negative_dimensions(const int rank,
}
#ifdef DEBUG_COPY_ND_ARRAY
-void dump_dims(int rank, npy_intp* dims) {
+void dump_dims(int rank, npy_intp const* dims) {
int i;
printf("[");
for(i=0;i<rank;++i) {
@@ -839,9 +796,10 @@ PyArrayObject* array_from_pyobj(const int type_num,
if ((intent & F2PY_INTENT_INOUT) ||
(intent & F2PY_INTENT_INPLACE) ||
(intent & F2PY_INTENT_CACHE)) {
- PyErr_SetString(PyExc_TypeError,
- "failed to initialize intent(inout|inplace|cache) "
- "array, input not an array");
+ PyErr_Format(PyExc_TypeError,
+ "failed to initialize intent(inout|inplace|cache) "
+ "array, input '%s' object is not an array",
+ Py_TYPE(obj)->tp_name);
return NULL;
}
@@ -1052,8 +1010,6 @@ int copy_ND_array(const PyArrayObject *arr, PyArrayObject *out)
/* Compatibility functions for Python >= 3.0 */
/*********************************************/
-#if PY_VERSION_HEX >= 0x03000000
-
PyObject *
F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
{
@@ -1080,29 +1036,6 @@ F2PyCapsule_Check(PyObject *ptr)
return PyCapsule_CheckExact(ptr);
}
-#else
-
-PyObject *
-F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *))
-{
- return PyCObject_FromVoidPtr(ptr, dtor);
-}
-
-void *
-F2PyCapsule_AsVoidPtr(PyObject *ptr)
-{
- return PyCObject_AsVoidPtr(ptr);
-}
-
-int
-F2PyCapsule_Check(PyObject *ptr)
-{
- return PyCObject_Check(ptr);
-}
-
-#endif
-
-
#ifdef __cplusplus
}
#endif
diff --git a/numpy/f2py/src/fortranobject.h b/numpy/f2py/src/fortranobject.h
index 5d0dcf676..5c382ab7b 100644
--- a/numpy/f2py/src/fortranobject.h
+++ b/numpy/f2py/src/fortranobject.h
@@ -11,30 +11,7 @@ extern "C" {
#endif
#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API
#include "numpy/arrayobject.h"
-
-/*
- * Python 3 support macros
- */
-#if PY_VERSION_HEX >= 0x03000000
-#define PyString_Check PyBytes_Check
-#define PyString_GET_SIZE PyBytes_GET_SIZE
-#define PyString_AS_STRING PyBytes_AS_STRING
-#define PyString_FromString PyBytes_FromString
-#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize
-#define PyString_ConcatAndDel PyBytes_ConcatAndDel
-#define PyString_AsString PyBytes_AsString
-
-#define PyInt_Check PyLong_Check
-#define PyInt_FromLong PyLong_FromLong
-#define PyInt_AS_LONG PyLong_AsLong
-#define PyInt_AsLong PyLong_AsLong
-
-#define PyNumber_Int PyNumber_Long
-
-#else
-
-#define PyUString_FromStringAndSize PyString_FromStringAndSize
-#endif
+#include "numpy/npy_3kcompat.h"
#ifdef F2PY_REPORT_ATEXIT
@@ -105,20 +82,10 @@ typedef struct {
extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init);
extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs);
-#if PY_VERSION_HEX >= 0x03000000
-
PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *));
void * F2PyCapsule_AsVoidPtr(PyObject *obj);
int F2PyCapsule_Check(PyObject *ptr);
-#else
-
-PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *));
-void * F2PyCapsule_AsVoidPtr(PyObject *ptr);
-int F2PyCapsule_Check(PyObject *ptr);
-
-#endif
-
#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS)
#define F2PY_INTENT_IN 1
#define F2PY_INTENT_INOUT 2
diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
index 978db4e69..83c0da2cf 100644
--- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
+++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
@@ -129,7 +129,6 @@ static PyMethodDef f2py_module_methods[] = {
{NULL,NULL}
};
-#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"test_array_from_pyobj_ext",
@@ -141,21 +140,10 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
-#if PY_VERSION_HEX >= 0x03000000
-#define RETVAL m
PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) {
-#else
-#define RETVAL
-PyMODINIT_FUNC inittest_array_from_pyobj_ext(void) {
-#endif
PyObject *m,*d, *s;
-#if PY_VERSION_HEX >= 0x03000000
m = wrap_module = PyModule_Create(&moduledef);
-#else
- m = wrap_module = Py_InitModule("test_array_from_pyobj_ext", f2py_module_methods);
-#endif
Py_TYPE(&PyFortran_Type) = &PyType_Type;
import_array();
if (PyErr_Occurred())
@@ -238,7 +226,7 @@ PyMODINIT_FUNC inittest_array_from_pyobj_ext(void) {
on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call");
#endif
- return RETVAL;
+ return m;
}
#ifdef __cplusplus
}
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index a80090185..b719f2495 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import copy
@@ -57,7 +55,7 @@ def flags2names(flags):
return info
-class Intent(object):
+class Intent:
def __init__(self, intent_list=[]):
self.intent_list = intent_list[:]
@@ -131,7 +129,7 @@ if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and
_cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE']
-class Type(object):
+class Type:
_type_cache = {}
def __new__(cls, name):
@@ -192,7 +190,7 @@ class Type(object):
return types
-class Array(object):
+class Array:
def __init__(self, typ, dims, intent, obj):
self.type = typ
@@ -293,7 +291,7 @@ class Array(object):
return obj_attr[0] == self.arr_attr[0]
-class TestIntent(object):
+class TestIntent:
def test_in_out(self):
assert_equal(str(intent.in_.out), 'intent(in,out)')
@@ -304,7 +302,7 @@ class TestIntent(object):
assert_(not intent.in_.is_intent('c'))
-class TestSharedMemory(object):
+class TestSharedMemory:
num2seq = [1, 2]
num23seq = [[1, 2, 3], [4, 5, 6]]
diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py
index 460afd68d..dfc252660 100644
--- a/numpy/f2py/tests/test_assumed_shape.py
+++ b/numpy/f2py/tests/test_assumed_shape.py
@@ -1,7 +1,6 @@
-from __future__ import division, absolute_import, print_function
-
import os
import pytest
+import tempfile
from numpy.testing import assert_
from . import util
@@ -16,6 +15,7 @@ class TestAssumedShapeSumExample(util.F2PyTest):
_path('src', 'assumed_shape', 'foo_use.f90'),
_path('src', 'assumed_shape', 'precision.f90'),
_path('src', 'assumed_shape', 'foo_mod.f90'),
+ _path('src', 'assumed_shape', '.f2py_f2cmap'),
]
@pytest.mark.slow
@@ -31,3 +31,23 @@ class TestAssumedShapeSumExample(util.F2PyTest):
assert_(r == 3, repr(r))
r = self.module.mod.fsum([1, 2])
assert_(r == 3, repr(r))
+
+
+class TestF2cmapOption(TestAssumedShapeSumExample):
+ def setup(self):
+ # Use a custom file name for .f2py_f2cmap
+ self.sources = list(self.sources)
+ f2cmap_src = self.sources.pop(-1)
+
+ self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False)
+ with open(f2cmap_src, 'rb') as f:
+ self.f2cmap_file.write(f.read())
+ self.f2cmap_file.close()
+
+ self.sources.append(self.f2cmap_file.name)
+ self.options = ["--f2cmap", self.f2cmap_file.name]
+
+ super(TestF2cmapOption, self).setup()
+
+ def teardown(self):
+ os.unlink(self.f2cmap_file.name)
diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py
index 4f1678980..e431f5ba6 100644
--- a/numpy/f2py/tests/test_block_docstring.py
+++ b/numpy/f2py/tests/test_block_docstring.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import pytest
from . import util
@@ -18,7 +16,8 @@ class TestBlockDocString(util.F2PyTest):
@pytest.mark.skipif(sys.platform=='win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
- @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
+ @pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_block_docstring(self):
expected = "'i'-array(2,3)\n"
assert_equal(self.module.block.__doc__, expected)
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 21c29ba5f..4e29ab9fc 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -1,12 +1,10 @@
-from __future__ import division, absolute_import, print_function
-
import math
import textwrap
import sys
import pytest
import numpy as np
-from numpy.testing import assert_, assert_equal
+from numpy.testing import assert_, assert_equal, IS_PYPY
from . import util
@@ -61,12 +59,12 @@ cf2py intent(out) a
end
"""
- @pytest.mark.slow
@pytest.mark.parametrize('name', 't,t2'.split(','))
def test_all(self, name):
self.check_function(name)
- @pytest.mark.slow
+ @pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_docstring(self):
expected = textwrap.dedent("""\
a = t(fun,[fun_extra_args])
@@ -118,7 +116,7 @@ cf2py intent(out) a
r = t(self.module.func0._cpointer)
assert_(r == 11, repr(r))
- class A(object):
+ class A:
def __call__(self):
return 7
diff --git a/numpy/f2py/tests/test_common.py b/numpy/f2py/tests/test_common.py
index dcb01b0ec..e4bf35504 100644
--- a/numpy/f2py/tests/test_common.py
+++ b/numpy/f2py/tests/test_common.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import pytest
diff --git a/numpy/f2py/tests/test_compile_function.py b/numpy/f2py/tests/test_compile_function.py
index 40ea7997f..f76fd6448 100644
--- a/numpy/f2py/tests/test_compile_function.py
+++ b/numpy/f2py/tests/test_compile_function.py
@@ -1,8 +1,6 @@
"""See https://github.com/numpy/numpy/pull/11937.
"""
-from __future__ import division, absolute_import, print_function
-
import sys
import os
import uuid
@@ -16,8 +14,6 @@ from . import util
def setup_module():
- if sys.platform == 'win32' and sys.version_info[0] < 3:
- pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)')
if not util.has_c_compiler():
pytest.skip("Needs C compiler")
if not util.has_f77_compiler():
diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py
new file mode 100644
index 000000000..735804024
--- /dev/null
+++ b/numpy/f2py/tests/test_crackfortran.py
@@ -0,0 +1,88 @@
+import numpy as np
+from numpy.testing import assert_array_equal
+from . import util
+from numpy.f2py import crackfortran
+import tempfile
+import textwrap
+
+
+class TestNoSpace(util.F2PyTest):
+ # issue gh-15035: add handling for endsubroutine, endfunction with no space
+ # between "end" and the block name
+ code = """
+ subroutine subb(k)
+ real(8), intent(inout) :: k(:)
+ k=k+1
+ endsubroutine
+
+ subroutine subc(w,k)
+ real(8), intent(in) :: w(:)
+ real(8), intent(out) :: k(size(w))
+ k=w+1
+ endsubroutine
+
+ function t0(value)
+ character value
+ character t0
+ t0 = value
+ endfunction
+ """
+
+ def test_module(self):
+ k = np.array([1, 2, 3], dtype=np.float64)
+ w = np.array([1, 2, 3], dtype=np.float64)
+ self.module.subb(k)
+ assert_array_equal(k, w + 1)
+ self.module.subc([w, k])
+ assert_array_equal(k, w + 1)
+ assert self.module.t0(23) == b'2'
+
+class TestPublicPrivate():
+ def test_defaultPrivate(self, tmp_path):
+ f_path = tmp_path / "mod.f90"
+ with f_path.open('w') as ff:
+ ff.write(textwrap.dedent("""\
+ module foo
+ private
+ integer :: a
+ public :: setA
+ integer :: b
+ contains
+ subroutine setA(v)
+ integer, intent(in) :: v
+ a = v
+ end subroutine setA
+ end module foo
+ """))
+ mod = crackfortran.crackfortran([str(f_path)])
+ assert len(mod) == 1
+ mod = mod[0]
+ assert 'private' in mod['vars']['a']['attrspec']
+ assert 'public' not in mod['vars']['a']['attrspec']
+ assert 'private' in mod['vars']['b']['attrspec']
+ assert 'public' not in mod['vars']['b']['attrspec']
+ assert 'private' not in mod['vars']['seta']['attrspec']
+ assert 'public' in mod['vars']['seta']['attrspec']
+
+ def test_defaultPublic(self, tmp_path):
+ f_path = tmp_path / "mod.f90"
+ with f_path.open('w') as ff:
+ ff.write(textwrap.dedent("""\
+ module foo
+ public
+ integer, private :: a
+ public :: setA
+ contains
+ subroutine setA(v)
+ integer, intent(in) :: v
+ a = v
+ end subroutine setA
+ end module foo
+ """))
+ mod = crackfortran.crackfortran([str(f_path)])
+ assert len(mod) == 1
+ mod = mod[0]
+ assert 'private' in mod['vars']['a']['attrspec']
+ assert 'public' not in mod['vars']['a']['attrspec']
+ assert 'private' not in mod['vars']['seta']['attrspec']
+ assert 'public' in mod['vars']['seta']['attrspec']
diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py
index 1f7762a80..a7e2b28ed 100644
--- a/numpy/f2py/tests/test_kind.py
+++ b/numpy/f2py/tests/test_kind.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import pytest
diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py
index 0337538ff..04266ca5b 100644
--- a/numpy/f2py/tests/test_mixed.py
+++ b/numpy/f2py/tests/test_mixed.py
@@ -1,10 +1,8 @@
-from __future__ import division, absolute_import, print_function
-
import os
import textwrap
import pytest
-from numpy.testing import assert_, assert_equal
+from numpy.testing import assert_, assert_equal, IS_PYPY
from . import util
@@ -17,13 +15,13 @@ class TestMixed(util.F2PyTest):
_path('src', 'mixed', 'foo_fixed.f90'),
_path('src', 'mixed', 'foo_free.f90')]
- @pytest.mark.slow
def test_all(self):
assert_(self.module.bar11() == 11)
assert_(self.module.foo_fixed.bar12() == 12)
assert_(self.module.foo_free.bar13() == 13)
- @pytest.mark.slow
+ @pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_docstring(self):
expected = textwrap.dedent("""\
a = bar11()
diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py
index 6a378687a..b61827169 100644
--- a/numpy/f2py/tests/test_parameter.py
+++ b/numpy/f2py/tests/test_parameter.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import pytest
diff --git a/numpy/f2py/tests/test_quoted_character.py b/numpy/f2py/tests/test_quoted_character.py
index c9a1c36f5..20c77666c 100644
--- a/numpy/f2py/tests/test_quoted_character.py
+++ b/numpy/f2py/tests/test_quoted_character.py
@@ -1,10 +1,7 @@
"""See https://github.com/numpy/numpy/pull/10676.
"""
-from __future__ import division, absolute_import, print_function
-
import sys
-from importlib import import_module
import pytest
from numpy.testing import assert_equal
diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py
index 3adae635d..67e00f1f7 100644
--- a/numpy/f2py/tests/test_regression.py
+++ b/numpy/f2py/tests/test_regression.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import pytest
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index fc3a58d36..429e69bb4 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -1,16 +1,15 @@
-from __future__ import division, absolute_import, print_function
-
import pytest
from numpy import array
from numpy.testing import assert_
from . import util
+import platform
+IS_S390X = platform.machine() == 's390x'
class TestReturnCharacter(util.F2PyTest):
- def check_function(self, t):
- tname = t.__doc__.split()[0]
+ def check_function(self, t, tname):
if tname in ['t0', 't1', 's0', 's1']:
assert_(t(23) == b'2')
r = t('ab')
@@ -81,10 +80,10 @@ cf2py intent(out) ts
end
"""
- @pytest.mark.slow
+ @pytest.mark.xfail(IS_S390X, reason="calback returns ' '")
@pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(','))
def test_all(self, name):
- self.check_function(getattr(self.module, name))
+ self.check_function(getattr(self.module, name), name)
class TestF90ReturnCharacter(TestReturnCharacter):
@@ -140,7 +139,7 @@ module f90_return_char
end module f90_return_char
"""
- @pytest.mark.slow
+ @pytest.mark.xfail(IS_S390X, reason="calback returns ' '")
@pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(','))
def test_all(self, name):
- self.check_function(getattr(self.module.f90_return_char, name))
+ self.check_function(getattr(self.module.f90_return_char, name), name)
diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py
index 43c884dfb..3d2e2b94f 100644
--- a/numpy/f2py/tests/test_return_complex.py
+++ b/numpy/f2py/tests/test_return_complex.py
@@ -1,24 +1,20 @@
-from __future__ import division, absolute_import, print_function
-
import pytest
from numpy import array
-from numpy.compat import long
from numpy.testing import assert_, assert_raises
from . import util
class TestReturnComplex(util.F2PyTest):
- def check_function(self, t):
- tname = t.__doc__.split()[0]
+ def check_function(self, t, tname):
if tname in ['t0', 't8', 's0', 's8']:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234j) - 234.0j) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
- assert_(abs(t(long(234)) - 234.0) <= err)
+ assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err)
#assert_( abs(t('234')-234.)<=err)
#assert_( abs(t('234.6')-234.6)<=err)
@@ -104,10 +100,9 @@ cf2py intent(out) td
end
"""
- @pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
def test_all(self, name):
- self.check_function(getattr(self.module, name))
+ self.check_function(getattr(self.module, name), name)
class TestF90ReturnComplex(TestReturnComplex):
@@ -163,7 +158,6 @@ module f90_return_complex
end module f90_return_complex
"""
- @pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
def test_all(self, name):
- self.check_function(getattr(self.module.f90_return_complex, name))
+ self.check_function(getattr(self.module.f90_return_complex, name), name)
diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py
index 22f4acfdf..0a8121dc1 100644
--- a/numpy/f2py/tests/test_return_integer.py
+++ b/numpy/f2py/tests/test_return_integer.py
@@ -1,19 +1,15 @@
-from __future__ import division, absolute_import, print_function
-
import pytest
from numpy import array
-from numpy.compat import long
from numpy.testing import assert_, assert_raises
from . import util
class TestReturnInteger(util.F2PyTest):
- def check_function(self, t):
+ def check_function(self, t, tname):
assert_(t(123) == 123, repr(t(123)))
assert_(t(123.6) == 123)
- assert_(t(long(123)) == 123)
assert_(t('123') == 123)
assert_(t(-123) == -123)
assert_(t([123]) == 123)
@@ -38,7 +34,7 @@ class TestReturnInteger(util.F2PyTest):
assert_raises(Exception, t, t)
assert_raises(Exception, t, {})
- if t.__doc__.split()[0] in ['t8', 's8']:
+ if tname in ['t8', 's8']:
assert_raises(OverflowError, t, 100000000000000000000000)
assert_raises(OverflowError, t, 10000000011111111111111.23)
@@ -103,11 +99,10 @@ cf2py intent(out) t8
end
"""
- @pytest.mark.slow
@pytest.mark.parametrize('name',
't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
def test_all(self, name):
- self.check_function(getattr(self.module, name))
+ self.check_function(getattr(self.module, name), name)
class TestF90ReturnInteger(TestReturnInteger):
@@ -174,8 +169,7 @@ module f90_return_integer
end module f90_return_integer
"""
- @pytest.mark.slow
@pytest.mark.parametrize('name',
't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
def test_all(self, name):
- self.check_function(getattr(self.module.f90_return_integer, name))
+ self.check_function(getattr(self.module.f90_return_integer, name), name)
diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py
index 96f215a91..9db939c7e 100644
--- a/numpy/f2py/tests/test_return_logical.py
+++ b/numpy/f2py/tests/test_return_logical.py
@@ -1,9 +1,6 @@
-from __future__ import division, absolute_import, print_function
-
import pytest
from numpy import array
-from numpy.compat import long
from numpy.testing import assert_, assert_raises
from . import util
@@ -20,7 +17,6 @@ class TestReturnLogical(util.F2PyTest):
assert_(t(1j) == 1)
assert_(t(234) == 1)
assert_(t(234.6) == 1)
- assert_(t(long(234)) == 1)
assert_(t(234.6 + 3j) == 1)
assert_(t('234') == 1)
assert_(t('aaa') == 1)
diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py
index 315cfe49b..8e5022a8e 100644
--- a/numpy/f2py/tests/test_return_real.py
+++ b/numpy/f2py/tests/test_return_real.py
@@ -1,24 +1,20 @@
-from __future__ import division, absolute_import, print_function
-
import platform
import pytest
from numpy import array
-from numpy.compat import long
from numpy.testing import assert_, assert_raises
from . import util
class TestReturnReal(util.F2PyTest):
- def check_function(self, t):
- if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
+ def check_function(self, t, tname):
+ if tname in ['t0', 't4', 's0', 's4']:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
- assert_(abs(t(long(234)) - 234.0) <= err)
assert_(abs(t('234') - 234) <= err)
assert_(abs(t('234.6') - 234.6) <= err)
assert_(abs(t(-234) + 234) <= err)
@@ -34,7 +30,7 @@ class TestReturnReal(util.F2PyTest):
assert_(abs(t(array([234], 'B')) - 234.) <= err)
assert_(abs(t(array([234], 'f')) - 234.) <= err)
assert_(abs(t(array([234], 'd')) - 234.) <= err)
- if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
+ if tname in ['t0', 't4', 's0', 's4']:
assert_(t(1e200) == t(1e300)) # inf
#assert_raises(ValueError, t, array([234], 'S1'))
@@ -90,10 +86,9 @@ end interface
end python module c_ext_return_real
"""
- @pytest.mark.slow
@pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(','))
def test_all(self, name):
- self.check_function(getattr(self.module, name))
+ self.check_function(getattr(self.module, name), name)
class TestF77ReturnReal(TestReturnReal):
@@ -145,10 +140,9 @@ cf2py intent(out) td
end
"""
- @pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
def test_all(self, name):
- self.check_function(getattr(self.module, name))
+ self.check_function(getattr(self.module, name), name)
class TestF90ReturnReal(TestReturnReal):
@@ -204,7 +198,6 @@ module f90_return_real
end module f90_return_real
"""
- @pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
def test_all(self, name):
- self.check_function(getattr(self.module.f90_return_real, name))
+ self.check_function(getattr(self.module.f90_return_real, name), name)
diff --git a/numpy/f2py/tests/test_semicolon_split.py b/numpy/f2py/tests/test_semicolon_split.py
index bcd18c893..d8b4bf222 100644
--- a/numpy/f2py/tests/test_semicolon_split.py
+++ b/numpy/f2py/tests/test_semicolon_split.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import platform
import pytest
diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py
index e2af61804..b609fa77f 100644
--- a/numpy/f2py/tests/test_size.py
+++ b/numpy/f2py/tests/test_size.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import pytest
diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py
index 0493c99cf..e3ec96af9 100644
--- a/numpy/f2py/tests/test_string.py
+++ b/numpy/f2py/tests/test_string.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import pytest
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index 77cb612d0..c5b06697d 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -5,8 +5,6 @@ Utility functions for
- detecting if compilers are present
"""
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import subprocess
@@ -21,10 +19,7 @@ from numpy.compat import asbytes, asstr
from numpy.testing import temppath
from importlib import import_module
-try:
- from hashlib import md5
-except ImportError:
- from md5 import new as md5 # noqa: F401
+from hashlib import md5
#
# Maintaining a temporary module directory
@@ -107,6 +102,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None):
# Copy files
dst_sources = []
+ f2py_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
@@ -114,16 +110,14 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None):
shutil.copyfile(fn, dst)
dst_sources.append(dst)
- fn = os.path.join(os.path.dirname(fn), '.f2py_f2cmap')
- if os.path.isfile(fn):
- dst = os.path.join(d, os.path.basename(fn))
- if not os.path.isfile(dst):
- shutil.copyfile(fn, dst)
+ base, ext = os.path.splitext(dst)
+ if ext in ('.f90', '.f', '.c', '.pyf'):
+ f2py_sources.append(dst)
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
- f2py_opts = ['-c', '-m', module_name] + options + dst_sources
+ f2py_opts = ['-c', '-m', module_name] + options + f2py_sources
if skip:
f2py_opts += ['skip:'] + skip
if only:
@@ -205,14 +199,20 @@ def _get_compiler_status():
""")
code = code % dict(syspath=repr(sys.path))
- with temppath(suffix='.py') as script:
+ tmpdir = tempfile.mkdtemp()
+ try:
+ script = os.path.join(tmpdir, 'setup.py')
+
with open(script, 'w') as f:
f.write(code)
- cmd = [sys.executable, script, 'config']
+ cmd = [sys.executable, 'setup.py', 'config']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ stderr=subprocess.STDOUT,
+ cwd=tmpdir)
out, err = p.communicate()
+ finally:
+ shutil.rmtree(tmpdir)
m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out)
if m:
@@ -279,9 +279,8 @@ def build_module_distutils(source_files, config_code, module_name, **kw):
script = os.path.join(d, get_temp_module_name() + '.py')
dst_sources.append(script)
- f = open(script, 'wb')
- f.write(asbytes(code))
- f.close()
+ with open(script, 'wb') as f:
+ f.write(asbytes(code))
# Build
cwd = os.getcwd()
@@ -310,7 +309,7 @@ def build_module_distutils(source_files, config_code, module_name, **kw):
#
-class F2PyTest(object):
+class F2PyTest:
code = None
sources = None
options = []
diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py
index 6f44f1634..f1b71e83c 100644
--- a/numpy/f2py/use_rules.py
+++ b/numpy/f2py/use_rules.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
Build 'use others module data' mechanism for f2py2e.
@@ -15,8 +15,6 @@ $Date: 2000/09/10 12:35:43 $
Pearu Peterson
"""
-from __future__ import division, absolute_import, print_function
-
__version__ = "$Revision: 1.3 $"[10:-1]
f2py_version = 'See `f2py -v`'
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
index fe95d8b17..36cfe81b3 100644
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -118,8 +118,16 @@ The inverse DFT is defined as
It differs from the forward transform by the sign of the exponential
argument and the default normalization by :math:`1/n`.
+Type Promotion
+--------------
+
+`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and
+``complex128`` arrays respectively. For an FFT implementation that does not
+promote input arrays, see `scipy.fftpack`.
+
Normalization
-------------
+
The default normalization has the direct transforms unscaled and the inverse
transforms are scaled by :math:`1/n`. It is possible to obtain unitary
transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
@@ -183,8 +191,6 @@ For examples, see the various functions.
"""
-from __future__ import division, absolute_import, print_function
-
from ._pocketfft import *
from .helper import *
diff --git a/numpy/fft/_pocketfft.c b/numpy/fft/_pocketfft.c
index d75b9983c..764116a84 100644
--- a/numpy/fft/_pocketfft.c
+++ b/numpy/fft/_pocketfft.c
@@ -10,6 +10,11 @@
* \author Martin Reinecke
*/
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "Python.h"
+#include "numpy/arrayobject.h"
+
#include <math.h>
#include <string.h>
#include <stdlib.h>
@@ -2184,11 +2189,6 @@ WARN_UNUSED_RESULT static int rfft_forward(rfft_plan plan, double c[], double fc
return rfftblue_forward(plan->blueplan,c,fct);
}
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-
-#include "Python.h"
-#include "numpy/arrayobject.h"
-
static PyObject *
execute_complex(PyObject *a1, int is_forward, double fct)
{
@@ -2359,7 +2359,6 @@ static struct PyMethodDef methods[] = {
{NULL, NULL, 0, NULL} /* sentinel */
};
-#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_pocketfft_internal",
@@ -2371,30 +2370,14 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
/* Initialization function for the module */
-#if PY_MAJOR_VERSION >= 3
-#define RETVAL(x) x
PyMODINIT_FUNC PyInit__pocketfft_internal(void)
-#else
-#define RETVAL(x)
-PyMODINIT_FUNC
-init_pocketfft_internal(void)
-#endif
{
PyObject *m;
-#if PY_MAJOR_VERSION >= 3
m = PyModule_Create(&moduledef);
-#else
- static const char module_documentation[] = "";
-
- m = Py_InitModule4("_pocketfft_internal", methods,
- module_documentation,
- (PyObject*)NULL,PYTHON_API_VERSION);
-#endif
if (m == NULL) {
- return RETVAL(NULL);
+ return NULL;
}
/* Import the array object */
@@ -2402,5 +2385,5 @@ init_pocketfft_internal(void)
/* XXXX Add constants here */
- return RETVAL(m);
+ return m;
}
diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py
index 50720cda4..3eab242e5 100644
--- a/numpy/fft/_pocketfft.py
+++ b/numpy/fft/_pocketfft.py
@@ -27,8 +27,6 @@ n = n-dimensional transform
behavior.)
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
@@ -61,12 +59,11 @@ def _raw_fft(a, n, axis, is_real, is_forward, inv_norm):
if a.shape[axis] != n:
s = list(a.shape)
+ index = [slice(None)]*len(s)
if s[axis] > n:
- index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[tuple(index)]
else:
- index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py
index a920a4ac0..3dacd9ee1 100644
--- a/numpy/fft/helper.py
+++ b/numpy/fft/helper.py
@@ -2,8 +2,6 @@
Discrete Fourier Transforms - helper.py
"""
-from __future__ import division, absolute_import, print_function
-
from numpy.compat import integer_types
from numpy.core import integer, empty, arange, asarray, roll
from numpy.core.overrides import array_function_dispatch, set_module
diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py
index 8c3a31557..40d632ec5 100644
--- a/numpy/fft/setup.py
+++ b/numpy/fft/setup.py
@@ -1,5 +1,3 @@
-from __future__ import division, print_function
-
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index 6613c8002..68f5990af 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -3,13 +3,12 @@
Copied from fftpack.helper by Pearu Peterson, October 2005
"""
-from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_equal
+from numpy.testing import assert_array_almost_equal
from numpy import fft, pi
-class TestFFTShift(object):
+class TestFFTShift:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
@@ -135,7 +134,7 @@ class TestFFTShift(object):
original_ifftshift(inp, axes_keyword))
-class TestFFTFreq(object):
+class TestFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
@@ -146,7 +145,7 @@ class TestFFTFreq(object):
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
-class TestRFFTFreq(object):
+class TestRFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4]
@@ -157,7 +156,7 @@ class TestRFFTFreq(object):
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
-class TestIRFFTN(object):
+class TestIRFFTN:
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py
index 453e964fa..7c3db0485 100644
--- a/numpy/fft/tests/test_pocketfft.py
+++ b/numpy/fft/tests/test_pocketfft.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
import pytest
from numpy.random import random
@@ -7,11 +5,7 @@ from numpy.testing import (
assert_array_equal, assert_raises, assert_allclose
)
import threading
-import sys
-if sys.version_info[0] >= 3:
- import queue
-else:
- import Queue as queue
+import queue
def fft1(x):
@@ -21,13 +15,13 @@ def fft1(x):
return np.sum(x*np.exp(phase), axis=1)
-class TestFFTShift(object):
+class TestFFTShift:
def test_fft_n(self):
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
-class TestFFT1D(object):
+class TestFFT1D:
def test_identity(self):
maxlen = 512
@@ -222,7 +216,7 @@ def test_fft_with_order(dtype, order, fft):
raise ValueError()
-class TestFFTThreadSafe(object):
+class TestFFTThreadSafe:
threads = 16
input_shape = (800, 200)
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index 2db12d9a4..cb0de0d15 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -11,8 +11,6 @@ Most contains basic functions that are used by several submodules and are
useful to have in the main name-space.
"""
-from __future__ import division, absolute_import, print_function
-
import math
from numpy.version import version as __version__
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index 0d71375c2..f5d0cc217 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -34,11 +34,7 @@ Example::
>>> fp.close() # doctest: +SKIP
"""
-from __future__ import division, absolute_import, print_function
-
import os
-import sys
-import warnings
import shutil
import io
from contextlib import closing
@@ -72,76 +68,12 @@ def _check_mode(mode, encoding, newline):
raise ValueError("Argument 'newline' not supported in binary mode")
-def _python2_bz2open(fn, mode, encoding, newline):
- """Wrapper to open bz2 in text mode.
-
- Parameters
- ----------
- fn : str
- File name
- mode : {'r', 'w'}
- File mode. Note that bz2 Text files are not supported.
- encoding : str
- Ignored, text bz2 files not supported in Python2.
- newline : str
- Ignored, text bz2 files not supported in Python2.
- """
- import bz2
-
- _check_mode(mode, encoding, newline)
-
- if "t" in mode:
- # BZ2File is missing necessary functions for TextIOWrapper
- warnings.warn("Assuming latin1 encoding for bz2 text file in Python2",
- RuntimeWarning, stacklevel=5)
- mode = mode.replace("t", "")
- return bz2.BZ2File(fn, mode)
-
-def _python2_gzipopen(fn, mode, encoding, newline):
- """ Wrapper to open gzip in text mode.
-
- Parameters
- ----------
- fn : str, bytes, file
- File path or opened file.
- mode : str
- File mode. The actual files are opened as binary, but will decoded
- using the specified `encoding` and `newline`.
- encoding : str
- Encoding to be used when reading/writing as text.
- newline : str
- Newline to be used when reading/writing as text.
-
- """
- import gzip
- # gzip is lacking read1 needed for TextIOWrapper
- class GzipWrap(gzip.GzipFile):
- def read1(self, n):
- return self.read(n)
-
- _check_mode(mode, encoding, newline)
-
- gz_mode = mode.replace("t", "")
-
- if isinstance(fn, (str, bytes)):
- binary_file = GzipWrap(fn, gz_mode)
- elif hasattr(fn, "read") or hasattr(fn, "write"):
- binary_file = GzipWrap(None, gz_mode, fileobj=fn)
- else:
- raise TypeError("filename must be a str or bytes object, or a file")
-
- if "t" in mode:
- return io.TextIOWrapper(binary_file, encoding, newline=newline)
- else:
- return binary_file
-
-
# Using a class instead of a module-level dictionary
# to reduce the initial 'import numpy' overhead by
# deferring the import of lzma, bz2 and gzip until needed
# TODO: .zip support, .tar support?
-class _FileOpeners(object):
+class _FileOpeners:
"""
Container for different methods to open (un-)compressed files.
@@ -176,19 +108,13 @@ class _FileOpeners(object):
try:
import bz2
- if sys.version_info[0] >= 3:
- self._file_openers[".bz2"] = bz2.open
- else:
- self._file_openers[".bz2"] = _python2_bz2open
+ self._file_openers[".bz2"] = bz2.open
except ImportError:
pass
try:
import gzip
- if sys.version_info[0] >= 3:
- self._file_openers[".gz"] = gzip.open
- else:
- self._file_openers[".gz"] = _python2_gzipopen
+ self._file_openers[".gz"] = gzip.open
except ImportError:
pass
@@ -270,7 +196,7 @@ def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
@set_module('numpy')
-class DataSource(object):
+class DataSource:
"""
DataSource(destpath='.')
@@ -377,10 +303,7 @@ class DataSource(object):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
- if sys.version_info[0] >= 3:
- from urllib.parse import urlparse
- else:
- from urlparse import urlparse
+ from urllib.parse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
@@ -397,14 +320,10 @@ class DataSource(object):
Creates a copy of the file in the datasource cache.
"""
- # We import these here because importing urllib2 is slow and
+ # We import these here because importing urllib is slow and
# a significant fraction of numpy's total import time.
- if sys.version_info[0] >= 3:
- from urllib.request import urlopen
- from urllib.error import URLError
- else:
- from urllib2 import urlopen
- from urllib2 import URLError
+ from urllib.request import urlopen
+ from urllib.error import URLError
upath = self.abspath(path)
@@ -479,10 +398,7 @@ class DataSource(object):
"""
# We do this here to reduce the 'import numpy' initial import time.
- if sys.version_info[0] >= 3:
- from urllib.parse import urlparse
- else:
- from urlparse import urlparse
+ from urllib.parse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
@@ -549,14 +465,10 @@ class DataSource(object):
if os.path.exists(path):
return True
- # We import this here because importing urllib2 is slow and
+ # We import this here because importing urllib is slow and
# a significant fraction of numpy's total import time.
- if sys.version_info[0] >= 3:
- from urllib.request import urlopen
- from urllib.error import URLError
- else:
- from urllib2 import urlopen
- from urllib2 import URLError
+ from urllib.request import urlopen
+ from urllib.error import URLError
# Test cached url
upath = self.abspath(path)
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index c392929fd..ff5b94342 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -1,20 +1,11 @@
"""A collection of functions designed to help I/O with ascii files.
"""
-from __future__ import division, absolute_import, print_function
-
__docformat__ = "restructuredtext en"
-import sys
import numpy as np
import numpy.core.numeric as nx
-from numpy.compat import asbytes, asunicode, bytes, basestring
-
-if sys.version_info[0] >= 3:
- from builtins import bool, int, float, complex, object, str
- unicode = str
-else:
- from __builtin__ import bool, int, float, complex, object, unicode, str
+from numpy.compat import asbytes, asunicode, bytes
def _decode_line(line, encoding=None):
@@ -65,40 +56,6 @@ def _is_bytes_like(obj):
return True
-def _to_filehandle(fname, flag='r', return_opened=False):
- """
- Returns the filehandle corresponding to a string or a file.
- If the string ends in '.gz', the file is automatically unzipped.
-
- Parameters
- ----------
- fname : string, filehandle
- Name of the file whose filehandle must be returned.
- flag : string, optional
- Flag indicating the status of the file ('r' for read, 'w' for write).
- return_opened : boolean, optional
- Whether to return the opening status of the file.
- """
- if _is_string_like(fname):
- if fname.endswith('.gz'):
- import gzip
- fhd = gzip.open(fname, flag)
- elif fname.endswith('.bz2'):
- import bz2
- fhd = bz2.BZ2File(fname)
- else:
- fhd = file(fname, flag)
- opened = True
- elif hasattr(fname, 'seek'):
- fhd = fname
- opened = False
- else:
- raise ValueError('fname must be a string or file handle')
- if return_opened:
- return fhd, opened
- return fhd
-
-
def has_nested_fields(ndtype):
"""
Returns whether one or several fields of a dtype are nested.
@@ -173,7 +130,7 @@ def flatten_dtype(ndtype, flatten_base=False):
return types
-class LineSplitter(object):
+class LineSplitter:
"""
Object to split a string at a given delimiter or at given places.
@@ -210,14 +167,15 @@ class LineSplitter(object):
return lambda input: [_.strip() for _ in method(input)]
#
- def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None):
+ def __init__(self, delimiter=None, comments='#', autostrip=True,
+ encoding=None):
delimiter = _decode_line(delimiter)
comments = _decode_line(comments)
self.comments = comments
# Delimiter is a character
- if (delimiter is None) or isinstance(delimiter, basestring):
+ if (delimiter is None) or isinstance(delimiter, str):
delimiter = delimiter or None
_handyman = self._delimited_splitter
# Delimiter is a list of field widths
@@ -273,7 +231,7 @@ class LineSplitter(object):
return self._handyman(_decode_line(line, self.encoding))
-class NameValidator(object):
+class NameValidator:
"""
Object to validate a list of strings to use as field names.
@@ -387,7 +345,7 @@ class NameValidator(object):
if (nbfields is None):
return None
names = []
- if isinstance(names, basestring):
+ if isinstance(names, str):
names = [names, ]
if nbfields is not None:
nbnames = len(names)
@@ -496,7 +454,7 @@ class ConversionWarning(UserWarning):
pass
-class StringConverter(object):
+class StringConverter:
"""
Factory class for function transforming a string into another object
(int, float).
@@ -546,18 +504,23 @@ class StringConverter(object):
"""
#
_mapper = [(nx.bool_, str2bool, False),
- (nx.integer, int, -1)]
+ (nx.int_, int, -1),]
# On 32-bit systems, we need to make sure that we explicitly include
- # nx.int64 since ns.integer is nx.int32.
- if nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize:
+ # nx.int64 since ns.int_ is nx.int32.
+ if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
_mapper.append((nx.int64, int, -1))
- _mapper.extend([(nx.floating, float, nx.nan),
- (nx.complexfloating, complex, nx.nan + 0j),
+ _mapper.extend([(nx.float64, float, nx.nan),
+ (nx.complex128, complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
(nx.unicode_, asunicode, '???'),
- (nx.string_, asbytes, '???')])
+ (nx.string_, asbytes, '???'),
+ # If a non-default dtype is passed, fall back to generic
+ # ones (should only be used for the converter)
+ (nx.integer, int, -1),
+ (nx.floating, float, nx.nan),
+ (nx.complexfloating, complex, nx.nan + 0j),])
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
@@ -701,7 +664,7 @@ class StringConverter(object):
if missing_values is None:
self.missing_values = {''}
else:
- if isinstance(missing_values, basestring):
+ if isinstance(missing_values, str):
missing_values = missing_values.split(",")
self.missing_values = set(list(missing_values) + [''])
#
@@ -748,6 +711,26 @@ class StringConverter(object):
return self._callingfunction(value)
#
+ def _do_upgrade(self):
+ # Raise an exception if we locked the converter...
+ if self._locked:
+ errmsg = "Converter is locked and cannot be upgraded"
+ raise ConverterLockError(errmsg)
+ _statusmax = len(self._mapper)
+ # Complains if we try to upgrade by the maximum
+ _status = self._status
+ if _status == _statusmax:
+ errmsg = "Could not find a valid conversion function"
+ raise ConverterError(errmsg)
+ elif _status < _statusmax - 1:
+ _status += 1
+ self.type, self.func, default = self._mapper[_status]
+ self._status = _status
+ if self._initial_default is not None:
+ self.default = self._initial_default
+ else:
+ self.default = default
+
def upgrade(self, value):
"""
Find the best converter for a given string, and return the result.
@@ -773,24 +756,7 @@ class StringConverter(object):
try:
return self._strict_call(value)
except ValueError:
- # Raise an exception if we locked the converter...
- if self._locked:
- errmsg = "Converter is locked and cannot be upgraded"
- raise ConverterLockError(errmsg)
- _statusmax = len(self._mapper)
- # Complains if we try to upgrade by the maximum
- _status = self._status
- if _status == _statusmax:
- errmsg = "Could not find a valid conversion function"
- raise ConverterError(errmsg)
- elif _status < _statusmax - 1:
- _status += 1
- (self.type, self.func, default) = self._mapper[_status]
- self._status = _status
- if self._initial_default is not None:
- self.default = self._initial_default
- else:
- self.default = default
+ self._do_upgrade()
return self.upgrade(value)
def iterupgrade(self, value):
@@ -802,25 +768,7 @@ class StringConverter(object):
for _m in value:
_strict_call(_m)
except ValueError:
- # Raise an exception if we locked the converter...
- if self._locked:
- errmsg = "Converter is locked and cannot be upgraded"
- raise ConverterLockError(errmsg)
- _statusmax = len(self._mapper)
- # Complains if we try to upgrade by the maximum
- _status = self._status
- if _status == _statusmax:
- raise ConverterError(
- "Could not find a valid conversion function"
- )
- elif _status < _statusmax - 1:
- _status += 1
- (self.type, self.func, default) = self._mapper[_status]
- if self._initial_default is not None:
- self.default = self._initial_default
- else:
- self.default = default
- self._status = _status
+ self._do_upgrade()
self.iterupgrade(value)
def update(self, func, default=None, testing_value=None,
@@ -876,7 +824,7 @@ class StringConverter(object):
else:
if not np.iterable(missing_values):
missing_values = [missing_values]
- if not all(isinstance(v, basestring) for v in missing_values):
+ if not all(isinstance(v, str) for v in missing_values):
raise TypeError("missing_values must be strings or unicode")
self.missing_values.update(missing_values)
@@ -926,7 +874,7 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
- elif isinstance(names, basestring):
+ elif isinstance(names, str):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
@@ -934,7 +882,7 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
- if isinstance(names, basestring):
+ if isinstance(names, str):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if ndtype.names is None:
@@ -949,9 +897,10 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
elif ndtype.names is not None:
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
- if ((ndtype.names == tuple("f%i" % i for i in range(len(ndtype.names)))) and
- (defaultfmt != "f%i")):
- ndtype.names = validate([''] * len(ndtype.names), defaultfmt=defaultfmt)
+ numbered_names = tuple("f%i" % i for i in range(len(ndtype.names)))
+ if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
+ ndtype.names = validate([''] * len(ndtype.names),
+ defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py
index 8aa999fc9..d4098acb5 100644
--- a/numpy/lib/_version.py
+++ b/numpy/lib/_version.py
@@ -5,12 +5,8 @@ The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
-from __future__ import division, absolute_import, print_function
-
import re
-from numpy.compat import basestring
-
__all__ = ['NumpyVersion']
@@ -116,10 +112,10 @@ class NumpyVersion():
return vercmp
def _compare(self, other):
- if not isinstance(other, (basestring, NumpyVersion)):
+ if not isinstance(other, (str, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
- if isinstance(other, basestring):
+ if isinstance(other, str):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index 33e64708d..7569e7651 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -3,8 +3,6 @@ The arraypad module contains a group of functions to pad values onto the edges
of an n-dimensional array.
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.core.overrides import array_function_dispatch
from numpy.lib.index_tricks import ndindex
@@ -234,7 +232,7 @@ def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
def _get_stats(padded, axis, width_pair, length_pair, stat_func):
"""
- Calculate statistic for the empty-padded array in given dimnsion.
+ Calculate statistic for the empty-padded array in given dimension.
Parameters
----------
@@ -273,7 +271,7 @@ def _get_stats(padded, axis, width_pair, length_pair, stat_func):
if (left_length == 0 or right_length == 0) \
and stat_func in {np.amax, np.amin}:
- # amax and amin can't operate on an emtpy array,
+ # amax and amin can't operate on an empty array,
# raise a more descriptive warning here instead of the default one
raise ValueError("stat_length of 0 yields no value for padding")
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 2309f7e42..22687b941 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -25,8 +25,6 @@ To do: Optionally return indices analogously to unique for all functions.
:Author: Robert Cimrman
"""
-from __future__ import division, absolute_import, print_function
-
import functools
import numpy as np
@@ -94,8 +92,7 @@ def ediff1d(ary, to_end=None, to_begin=None):
# force a 1d array
ary = np.asanyarray(ary).ravel()
- # enforce propagation of the dtype of input
- # ary to returned result
+ # enforce that the dtype of `ary` is used for the output
dtype_req = ary.dtype
# fast track default case
@@ -105,22 +102,23 @@ def ediff1d(ary, to_end=None, to_begin=None):
if to_begin is None:
l_begin = 0
else:
- _to_begin = np.asanyarray(to_begin, dtype=dtype_req)
- if not np.all(_to_begin == to_begin):
- raise ValueError("cannot convert 'to_begin' to array with dtype "
- "'%r' as required for input ary" % dtype_req)
- to_begin = _to_begin.ravel()
+ to_begin = np.asanyarray(to_begin)
+ if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
+ raise TypeError("dtype of `to_end` must be compatible "
+ "with input `ary` under the `same_kind` rule.")
+
+ to_begin = to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
- _to_end = np.asanyarray(to_end, dtype=dtype_req)
- # check that casting has not overflowed
- if not np.all(_to_end == to_end):
- raise ValueError("cannot convert 'to_end' to array with dtype "
- "'%r' as required for input ary" % dtype_req)
- to_end = _to_end.ravel()
+ to_end = np.asanyarray(to_end)
+ if not np.can_cast(to_end, dtype_req, casting="same_kind"):
+ raise TypeError("dtype of `to_end` must be compatible "
+ "with input `ary` under the `same_kind` rule.")
+
+ to_end = to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
@@ -253,9 +251,9 @@ def unique(ar, return_index=False, return_inverse=False,
>>> u
array([1, 2, 3, 4, 6])
>>> indices
- array([0, 1, 4, ..., 1, 2, 1])
+ array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
- array([1, 2, 6, ..., 2, 3, 2])
+ array([1, 2, 6, 4, 2, 3, 2])
"""
ar = np.asanyarray(ar)
@@ -272,20 +270,33 @@ def unique(ar, return_index=False, return_inverse=False,
# Must reshape to a contiguous 2D array for this to work...
orig_shape, orig_dtype = ar.shape, ar.dtype
- ar = ar.reshape(orig_shape[0], -1)
+ ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
ar = np.ascontiguousarray(ar)
dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
+ # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
+ # data type with `m` fields where each field has the data type of `ar`.
+ # In the following, we create the array `consolidated`, which has
+ # shape `(n,)` with data type `dtype`.
try:
- consolidated = ar.view(dtype)
+ if ar.shape[1] > 0:
+ consolidated = ar.view(dtype)
+ else:
+ # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
+ # a data type with itemsize 0, and the call `ar.view(dtype)` will
+ # fail. Instead, we'll use `np.empty` to explicitly create the
+ # array with shape `(len(ar),)`. Because `dtype` in this case has
+ # itemsize 0, the total size of the result is still 0 bytes.
+ consolidated = np.empty(len(ar), dtype=dtype)
except TypeError:
# There's no good way to do this for object arrays, etc...
msg = 'The axis argument to unique is not supported for dtype {dt}'
raise TypeError(msg.format(dt=ar.dtype))
def reshape_uniq(uniq):
+ n = len(uniq)
uniq = uniq.view(orig_dtype)
- uniq = uniq.reshape(-1, *orig_shape[1:])
+ uniq = uniq.reshape(n, *orig_shape[1:])
uniq = np.moveaxis(uniq, 0, axis)
return uniq
@@ -785,4 +796,3 @@ def setdiff1d(ar1, ar2, assume_unique=False):
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
-
diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py
index c16668582..b9ea21f8e 100644
--- a/numpy/lib/arrayterator.py
+++ b/numpy/lib/arrayterator.py
@@ -7,17 +7,13 @@ an array object, and when iterated it will return sub-arrays with at most
a user-specified number of elements.
"""
-from __future__ import division, absolute_import, print_function
-
from operator import mul
from functools import reduce
-from numpy.compat import long
-
__all__ = ['Arrayterator']
-class Arrayterator(object):
+class Arrayterator:
"""
Buffered iterator for big arrays.
@@ -110,7 +106,7 @@ class Arrayterator(object):
if slice_ is Ellipsis:
fixed.extend([slice(None)] * (dims-length+1))
length = len(fixed)
- elif isinstance(slice_, (int, long)):
+ elif isinstance(slice_, int):
fixed.append(slice(slice_, slice_+1, 1))
else:
fixed.append(slice_)
@@ -163,8 +159,7 @@ class Arrayterator(object):
"""
for block in self:
- for value in block.flat:
- yield value
+ yield from block.flat
@property
def shape(self):
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index d72384e99..b055bb1ec 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -10,8 +10,7 @@ or arrays (or other sequences).
Functions support the :class:`decimal.Decimal` type unless
otherwise stated.
"""
-from __future__ import division, absolute_import, print_function
-
+import warnings
from decimal import Decimal
import functools
@@ -19,6 +18,10 @@ import numpy as np
from numpy.core import overrides
+_depmsg = ("numpy.{name} is deprecated and will be removed from NumPy 1.20. "
+ "Use numpy_financial.{name} instead "
+ "(https://pypi.org/project/numpy-financial/).")
+
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
@@ -45,6 +48,8 @@ def _convert_when(when):
def _fv_dispatcher(rate, nper, pmt, pv, when=None):
+ warnings.warn(_depmsg.format(name='fv'),
+ DeprecationWarning, stacklevel=3)
return (rate, nper, pmt, pv)
@@ -53,6 +58,12 @@ def fv(rate, nper, pmt, pv, when='end'):
"""
Compute the future value.
+ .. deprecated:: 1.18
+
+ `fv` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Given:
* a present value, `pv`
* an interest `rate` compounded once per period, of which
@@ -100,7 +111,9 @@ def fv(rate, nper, pmt, pv, when='end'):
References
----------
- .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
@@ -109,6 +122,7 @@ def fv(rate, nper, pmt, pv, when='end'):
http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
OpenDocument-formula-20090508.odt
+
Examples
--------
What is the future value after 10 years of saving $100 now, with
@@ -139,6 +153,8 @@ def fv(rate, nper, pmt, pv, when='end'):
def _pmt_dispatcher(rate, nper, pv, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='pmt'),
+ DeprecationWarning, stacklevel=3)
return (rate, nper, pv, fv)
@@ -147,6 +163,12 @@ def pmt(rate, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal plus interest.
+ .. deprecated:: 1.18
+
+ `pmt` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Given:
* a present value, `pv` (e.g., an amount borrowed)
* a future value, `fv` (e.g., 0)
@@ -204,7 +226,9 @@ def pmt(rate, nper, pv, fv=0, when='end'):
References
----------
- .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
@@ -237,6 +261,8 @@ def pmt(rate, nper, pv, fv=0, when='end'):
def _nper_dispatcher(rate, pmt, pv, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='nper'),
+ DeprecationWarning, stacklevel=3)
return (rate, pmt, pv, fv)
@@ -245,6 +271,12 @@ def nper(rate, pmt, pv, fv=0, when='end'):
"""
Compute the number of periodic payments.
+ .. deprecated:: 1.18
+
+ `nper` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
:class:`decimal.Decimal` type is not supported.
Parameters
@@ -270,6 +302,11 @@ def nper(rate, pmt, pv, fv=0, when='end'):
fv + pv + pmt*nper = 0
+ References
+ ----------
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+
Examples
--------
If you only had $150/month to pay towards the loan, how long would it take
@@ -311,6 +348,8 @@ def nper(rate, pmt, pv, fv=0, when='end'):
def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='ipmt'),
+ DeprecationWarning, stacklevel=3)
return (rate, per, nper, pv, fv)
@@ -319,6 +358,12 @@ def ipmt(rate, per, nper, pv, fv=0, when='end'):
"""
Compute the interest portion of a payment.
+ .. deprecated:: 1.18
+
+ `ipmt` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
rate : scalar or array_like of shape(M, )
@@ -354,6 +399,11 @@ def ipmt(rate, per, nper, pv, fv=0, when='end'):
``pmt = ppmt + ipmt``
+ References
+ ----------
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+
Examples
--------
What is the amortization schedule for a 1 year loan of $2500 at
@@ -422,6 +472,8 @@ def _rbl(rate, per, pmt, pv, when):
def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='ppmt'),
+ DeprecationWarning, stacklevel=3)
return (rate, per, nper, pv, fv)
@@ -430,6 +482,12 @@ def ppmt(rate, per, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal.
+ .. deprecated:: 1.18
+
+ `ppmt` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
rate : array_like
@@ -450,12 +508,19 @@ def ppmt(rate, per, nper, pv, fv=0, when='end'):
--------
pmt, pv, ipmt
+ References
+ ----------
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+
"""
total = pmt(rate, nper, pv, fv, when)
return total - ipmt(rate, per, nper, pv, fv, when)
def _pv_dispatcher(rate, nper, pmt, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='pv'),
+ DeprecationWarning, stacklevel=3)
return (rate, nper, nper, pv, fv)
@@ -464,6 +529,12 @@ def pv(rate, nper, pmt, fv=0, when='end'):
"""
Compute the present value.
+ .. deprecated:: 1.18
+
+ `pv` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Given:
* a future value, `fv`
* an interest `rate` compounded once per period, of which
@@ -510,7 +581,9 @@ def pv(rate, nper, pmt, fv=0, when='end'):
References
----------
- .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
@@ -567,6 +640,8 @@ def _g_div_gp(r, n, p, x, y, w):
def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None,
maxiter=None):
+ warnings.warn(_depmsg.format(name='rate'),
+ DeprecationWarning, stacklevel=3)
return (nper, pmt, pv, fv)
@@ -582,6 +657,12 @@ def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100):
"""
Compute the rate of interest per period.
+ .. deprecated:: 1.18
+
+ `rate` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
nper : array_like
@@ -612,13 +693,16 @@ def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100):
References
----------
- Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document
- Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated
- Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12.
- Organization for the Advancement of Structured Information Standards
- (OASIS). Billerica, MA, USA. [ODT Document]. Available:
- http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
- OpenDocument-formula-20090508.odt
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ Open Document Format for Office Applications (OpenDocument)v1.2,
+ Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
+ Pre-Draft 12. Organization for the Advancement of Structured Information
+ Standards (OASIS). Billerica, MA, USA. [ODT Document].
+ Available:
+ http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
+ OpenDocument-formula-20090508.odt
"""
when = _convert_when(when)
@@ -651,6 +735,8 @@ def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100):
def _irr_dispatcher(values):
+ warnings.warn(_depmsg.format(name='irr'),
+ DeprecationWarning, stacklevel=3)
return (values,)
@@ -659,6 +745,12 @@ def irr(values):
"""
Return the Internal Rate of Return (IRR).
+ .. deprecated:: 1.18
+
+ `irr` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
This is the "average" periodically compounded rate of return
that gives a net present value of 0.0; for a more complete explanation,
see Notes below.
@@ -693,13 +785,15 @@ def irr(values):
+ \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0
In general, for `values` :math:`= [v_0, v_1, ... v_M]`,
- irr is the solution of the equation: [G]_
+ irr is the solution of the equation: [2]_
.. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0
References
----------
- .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
Addison-Wesley, 2003, pg. 348.
Examples
@@ -734,6 +828,8 @@ def irr(values):
def _npv_dispatcher(rate, values):
+ warnings.warn(_depmsg.format(name='npv'),
+ DeprecationWarning, stacklevel=3)
return (values,)
@@ -742,6 +838,12 @@ def npv(rate, values):
"""
Returns the NPV (Net Present Value) of a cash flow series.
+ .. deprecated:: 1.18
+
+ `npv` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
rate : scalar
@@ -772,13 +874,15 @@ def npv(rate, values):
Notes
-----
- Returns the result of: [G]_
+ Returns the result of: [2]_
.. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}}
References
----------
- .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
Addison-Wesley, 2003, pg. 346.
Examples
@@ -808,6 +912,8 @@ def npv(rate, values):
def _mirr_dispatcher(values, finance_rate, reinvest_rate):
+ warnings.warn(_depmsg.format(name='mirr'),
+ DeprecationWarning, stacklevel=3)
return (values,)
@@ -816,6 +922,12 @@ def mirr(values, finance_rate, reinvest_rate):
"""
Modified internal rate of return.
+ .. deprecated:: 1.18
+
+ `mirr` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
values : array_like
@@ -832,6 +944,10 @@ def mirr(values, finance_rate, reinvest_rate):
out : float
Modified internal rate of return
+ References
+ ----------
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
"""
values = np.asarray(values)
n = values.size
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 1ecd72815..2afa4ac10 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -161,15 +161,12 @@ alternatives, is described in the `"npy-format" NEP
evolved with time and this document is more current.
"""
-from __future__ import division, absolute_import, print_function
-
import numpy
-import sys
import io
import warnings
from numpy.lib.utils import safe_eval
from numpy.compat import (
- isfileobj, long, os_fspath, pickle
+ isfileobj, os_fspath, pickle
)
@@ -215,10 +212,7 @@ def magic(major, minor):
raise ValueError("major version must be 0 <= major < 256")
if minor < 0 or minor > 255:
raise ValueError("minor version must be 0 <= minor < 256")
- if sys.version_info[0] < 3:
- return MAGIC_PREFIX + chr(major) + chr(minor)
- else:
- return MAGIC_PREFIX + bytes([major, minor])
+ return MAGIC_PREFIX + bytes([major, minor])
def read_magic(fp):
""" Read the magic string to get the version of the file format.
@@ -236,12 +230,19 @@ def read_magic(fp):
if magic_str[:-2] != MAGIC_PREFIX:
msg = "the magic string is not correct; expected %r, got %r"
raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
- if sys.version_info[0] < 3:
- major, minor = map(ord, magic_str[-2:])
- else:
- major, minor = magic_str[-2:]
+ major, minor = magic_str[-2:]
return major, minor
+def _has_metadata(dt):
+ if dt.metadata is not None:
+ return True
+ elif dt.names is not None:
+ return any(_has_metadata(dt[k]) for k in dt.names)
+ elif dt.subdtype is not None:
+ return _has_metadata(dt.base)
+ else:
+ return False
+
def dtype_to_descr(dtype):
"""
Get a serializable descriptor from the dtype.
@@ -265,6 +266,10 @@ def dtype_to_descr(dtype):
replicate the input dtype.
"""
+ if _has_metadata(dtype):
+ warnings.warn("metadata on a dtype may be saved or ignored, but will "
+ "raise if saved when read. Use another form of storage.",
+ UserWarning, stacklevel=2)
if dtype.names is not None:
# This is a record array. The .descr is fine. XXX: parts of the
# record array with an empty name, like padding bytes, still get
@@ -290,7 +295,11 @@ def descr_to_dtype(descr):
# subtype, will always have a shape descr[1]
dt = descr_to_dtype(descr[0])
return numpy.dtype((dt, descr[1]))
- fields = []
+
+ titles = []
+ names = []
+ formats = []
+ offsets = []
offset = 0
for field in descr:
if len(field) == 2:
@@ -304,14 +313,13 @@ def descr_to_dtype(descr):
# Once support for blank names is removed, only "if name == ''" needed)
is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
if not is_pad:
- fields.append((name, dt, offset))
-
+ title, name = name if isinstance(name, tuple) else (None, name)
+ titles.append(title)
+ names.append(name)
+ formats.append(dt)
+ offsets.append(offset)
offset += dt.itemsize
- names, formats, offsets = zip(*fields)
- # names may be (title, names) tuples
- nametups = (n if isinstance(n, tuple) else (None, n) for n in names)
- titles, names = zip(*nametups)
return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
'offsets': offsets, 'itemsize': offset})
@@ -530,16 +538,11 @@ def _filter_header(s):
"""
import tokenize
- if sys.version_info[0] >= 3:
- from io import StringIO
- else:
- from StringIO import StringIO
+ from io import StringIO
tokens = []
last_token_was_number = False
- # adding newline as python 2.7.5 workaround
- string = s + "\n"
- for token in tokenize.generate_tokens(StringIO(string).readline):
+ for token in tokenize.generate_tokens(StringIO(s).readline):
token_type = token[0]
token_string = token[1]
if (last_token_was_number and
@@ -549,8 +552,7 @@ def _filter_header(s):
else:
tokens.append(token)
last_token_was_number = (token_type == tokenize.NUMBER)
- # removing newline (see above) as python 2.7.5 workaround
- return tokenize.untokenize(tokens)[:-1]
+ return tokenize.untokenize(tokens)
def _read_array_header(fp, version):
@@ -592,7 +594,7 @@ def _read_array_header(fp, version):
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
- not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
+ not numpy.all([isinstance(x, int) for x in d['shape']])):
msg = "shape is not valid: {!r}"
raise ValueError(msg.format(d['shape']))
if not isinstance(d['fortran_order'], bool):
@@ -600,7 +602,7 @@ def _read_array_header(fp, version):
raise ValueError(msg.format(d['fortran_order']))
try:
dtype = descr_to_dtype(d['descr'])
- except TypeError as e:
+ except TypeError:
msg = "descr is not a valid dtype descriptor: {!r}"
raise ValueError(msg.format(d['descr']))
@@ -729,12 +731,10 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None):
try:
array = pickle.load(fp, **pickle_kwargs)
except UnicodeError as err:
- if sys.version_info[0] >= 3:
- # Friendlier error message
- raise UnicodeError("Unpickling a python object failed: %r\n"
- "You may need to pass the encoding= option "
- "to numpy.load" % (err,))
- raise
+ # Friendlier error message
+ raise UnicodeError("Unpickling a python object failed: %r\n"
+ "You may need to pass the encoding= option "
+ "to numpy.load" % (err,))
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index c39c2eea1..bfcf0d316 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1,11 +1,4 @@
-from __future__ import division, absolute_import, print_function
-
-try:
- # Accessing collections abstract classes from collections
- # has been deprecated since Python 3.3
- import collections.abc as collections_abc
-except ImportError:
- import collections as collections_abc
+import collections.abc
import functools
import re
import sys
@@ -13,10 +6,10 @@ import warnings
import numpy as np
import numpy.core.numeric as _nx
-from numpy.core import atleast_1d, transpose
+from numpy.core import transpose
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
- empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
+ ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar, absolute
)
from numpy.core.umath import (
@@ -36,23 +29,17 @@ from numpy.core.multiarray import (
interp as compiled_interp, interp_complex as compiled_interp_complex
)
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
-from numpy.compat import long
-if sys.version_info[0] < 3:
- # Force range to be a generator, for np.delete's usage.
- range = xrange
- import __builtin__ as builtins
-else:
- import builtins
+import builtins
+
+# needed in this module for compatibility
+from numpy.lib.histograms import histogram, histogramdd
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
-# needed in this module for compatibility
-from numpy.lib.histograms import histogram, histogramdd
-
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip',
@@ -70,7 +57,7 @@ def _rot90_dispatcher(m, k=None, axes=None):
@array_function_dispatch(_rot90_dispatcher)
-def rot90(m, k=1, axes=(0,1)):
+def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
@@ -150,7 +137,7 @@ def rot90(m, k=1, axes=(0,1)):
axes_list[axes[0]])
if k == 1:
- return transpose(flip(m,axes[1]), axes_list)
+ return transpose(flip(m, axes[1]), axes_list)
else:
# k == 3
return flip(transpose(m, axes_list), axes[1])
@@ -504,8 +491,7 @@ def _piecewise_dispatcher(x, condlist, funclist, *args, **kw):
yield x
# support the undocumented behavior of allowing scalars
if np.iterable(condlist):
- for c in condlist:
- yield c
+ yield from condlist
@array_function_dispatch(_piecewise_dispatcher)
@@ -620,7 +606,7 @@ def piecewise(x, condlist, funclist, *args, **kw):
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
- if not isinstance(item, collections_abc.Callable):
+ if not isinstance(item, collections.abc.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
@@ -631,10 +617,8 @@ def piecewise(x, condlist, funclist, *args, **kw):
def _select_dispatcher(condlist, choicelist, default=None):
- for c in condlist:
- yield c
- for c in choicelist:
- yield c
+ yield from condlist
+ yield from choicelist
@array_function_dispatch(_select_dispatcher)
@@ -723,12 +707,12 @@ def select(condlist, choicelist, default=0):
return result
-def _copy_dispatcher(a, order=None):
+def _copy_dispatcher(a, order=None, subok=None):
return (a,)
@array_function_dispatch(_copy_dispatcher)
-def copy(a, order='K'):
+def copy(a, order='K', subok=False):
"""
Return an array copy of the given object.
@@ -743,12 +727,21 @@ def copy(a, order='K'):
as possible. (Note that this function and :meth:`ndarray.copy` are very
similar, but have different default values for their order=
arguments.)
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise the
+ returned array will be forced to be a base-class array (defaults to False).
+
+ .. versionadded:: 1.19.0
Returns
-------
arr : ndarray
Array interpretation of `a`.
+ See Also
+ --------
+ ndarray.copy : Preferred method for creating an array copy
+
Notes
-----
This is equivalent to:
@@ -772,19 +765,18 @@ def copy(a, order='K'):
False
"""
- return array(a, order=order, copy=True)
+ return array(a, order=order, subok=subok, copy=True)
# Basic operations
-def _gradient_dispatcher(f, *varargs, **kwargs):
+def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None):
yield f
- for v in varargs:
- yield v
+ yield from varargs
@array_function_dispatch(_gradient_dispatcher)
-def gradient(f, *varargs, **kwargs):
+def gradient(f, *varargs, axis=None, edge_order=1):
"""
Return the gradient of an N-dimensional array.
@@ -961,11 +953,10 @@ def gradient(f, *varargs, **kwargs):
f = np.asanyarray(f)
N = f.ndim # number of dimensions
- axes = kwargs.pop('axis', None)
- if axes is None:
+ if axis is None:
axes = tuple(range(N))
else:
- axes = _nx.normalize_axis_tuple(axes, N)
+ axes = _nx.normalize_axis_tuple(axis, N)
len_axes = len(axes)
n = len(varargs)
@@ -979,13 +970,18 @@ def gradient(f, *varargs, **kwargs):
# scalar or 1d array for each axis
dx = list(varargs)
for i, distances in enumerate(dx):
- if np.ndim(distances) == 0:
+ distances = np.asanyarray(distances)
+ if distances.ndim == 0:
continue
- elif np.ndim(distances) != 1:
+ elif distances.ndim != 1:
raise ValueError("distances must be either scalars or 1d")
if len(distances) != f.shape[axes[i]]:
raise ValueError("when 1d, distances must match "
"the length of the corresponding dimension")
+ if np.issubdtype(distances.dtype, np.integer):
+ # Convert numpy integer types to float64 to avoid modular
+ # arithmetic in np.diff(distances).
+ distances = distances.astype(np.float64)
diffx = np.diff(distances)
# if distances are constant reduce to the scalar case
# since it brings a consistent speedup
@@ -995,10 +991,6 @@ def gradient(f, *varargs, **kwargs):
else:
raise TypeError("invalid number of arguments")
- edge_order = kwargs.pop('edge_order', 1)
- if kwargs:
- raise TypeError('"{}" are not valid keyword arguments.'.format(
- '", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
@@ -1024,8 +1016,12 @@ def gradient(f, *varargs, **kwargs):
elif np.issubdtype(otype, np.inexact):
pass
else:
- # all other types convert to floating point
- otype = np.double
+ # All other types convert to floating point.
+ # First check if f is a numpy integer type; if so, convert f to float64
+ # to avoid modular arithmetic when computing the changes in f.
+ if np.issubdtype(otype, np.integer):
+ f = f.astype(np.float64)
+ otype = np.float64
for axis, ax_dx in zip(axes, dx):
if f.shape[axis] < edge_order + 1:
@@ -1612,6 +1608,7 @@ def trim_zeros(filt, trim='fb'):
last = last - 1
return filt[first:last]
+
def _extract_dispatcher(condition, arr):
return (condition, arr)
@@ -1867,7 +1864,7 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):
@set_module('numpy')
-class vectorize(object):
+class vectorize:
"""
vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
signature=None)
@@ -1893,7 +1890,7 @@ class vectorize(object):
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
- The docstring for the function. If `None`, the docstring will be the
+ The docstring for the function. If None, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
@@ -2305,7 +2302,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
>>> m = np.arange(10, dtype=np.float64)
>>> f = np.arange(10) * 2
>>> a = np.arange(10) ** 2.
- >>> ddof = 9 # N - 1
+ >>> ddof = 1
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
@@ -2947,6 +2944,7 @@ def hamming(M):
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
+
## Code from cephes for i0
_i0A = [
@@ -3489,6 +3487,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
else:
return r
+
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
@@ -3707,7 +3706,7 @@ def quantile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the q-th quantile of the data along the specified axis.
-
+
.. versionadded:: 1.15.0
Parameters
@@ -3878,8 +3877,8 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
- n = np.array(False, dtype=bool) # check for nan's flag
- if indices.dtype == intp: # take the points along axis
+ n = np.array(False, dtype=bool) # check for nan's flag
+ if np.issubdtype(indices.dtype, np.integer): # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
@@ -3898,7 +3897,6 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
-
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
@@ -4059,13 +4057,13 @@ def trapz(y, x=None, dx=1.0, axis=-1):
return ret
-def _meshgrid_dispatcher(*xi, **kwargs):
+def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
return xi
# Based on scitools meshgrid
@array_function_dispatch(_meshgrid_dispatcher)
-def meshgrid(*xi, **kwargs):
+def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
"""
Return coordinate matrices from coordinate vectors.
@@ -4171,14 +4169,6 @@ def meshgrid(*xi, **kwargs):
"""
ndim = len(xi)
- copy_ = kwargs.pop('copy', True)
- sparse = kwargs.pop('sparse', False)
- indexing = kwargs.pop('indexing', 'xy')
-
- if kwargs:
- raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
- % (list(kwargs)[0],))
-
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
@@ -4196,7 +4186,7 @@ def meshgrid(*xi, **kwargs):
# Return the full N-D matrix (not only the 1-D vector)
output = np.broadcast_arrays(*output, subok=True)
- if copy_:
+ if copy:
output = [x.copy() for x in output]
return output
@@ -4216,12 +4206,17 @@ def delete(arr, obj, axis=None):
Parameters
----------
arr : array_like
- Input array.
+ Input array.
obj : slice, int or array of ints
- Indicate indices of sub-arrays to remove along the specified axis.
+ Indicate indices of sub-arrays to remove along the specified axis.
+
+ .. versionchanged:: 1.19.0
+ Boolean indices are now treated as a mask of elements to remove,
+ rather than being cast to the integers 0 and 1.
+
axis : int, optional
- The axis along which to delete the subarray defined by `obj`.
- If `axis` is None, `obj` is applied to the flattened array.
+ The axis along which to delete the subarray defined by `obj`.
+ If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
@@ -4279,20 +4274,11 @@ def delete(arr, obj, axis=None):
if axis is None:
if ndim != 1:
arr = arr.ravel()
+ # needed for np.matrix, which is still not 1d after being ravelled
ndim = arr.ndim
- axis = -1
-
- if ndim == 0:
- # 2013-09-24, 1.9
- warnings.warn(
- "in the future the special handling of scalars will be removed "
- "from delete and raise an error", DeprecationWarning, stacklevel=3)
- if wrap:
- return wrap(arr)
- else:
- return arr.copy(order=arrorder)
-
- axis = normalize_axis_index(axis, ndim)
+ axis = ndim - 1
+ else:
+ axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)]*ndim
N = arr.shape[axis]
@@ -4348,18 +4334,8 @@ def delete(arr, obj, axis=None):
else:
return new
- _obj = obj
- obj = np.asarray(obj)
- # After removing the special handling of booleans and out of
- # bounds values, the conversion to the array can be removed.
- if obj.dtype == bool:
- warnings.warn("in the future insert will treat boolean arrays and "
- "array-likes as boolean index instead of casting it "
- "to integer", FutureWarning, stacklevel=3)
- obj = obj.astype(intp)
- if isinstance(_obj, (int, long, integer)):
+ if isinstance(obj, (int, integer)) and not isinstance(obj, bool):
# optimization for a single value
- obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
@@ -4375,35 +4351,23 @@ def delete(arr, obj, axis=None):
slobj2[axis] = slice(obj+1, None)
new[tuple(slobj)] = arr[tuple(slobj2)]
else:
+ _obj = obj
+ obj = np.asarray(obj)
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
- if not np.can_cast(obj, intp, 'same_kind'):
- # obj.size = 1 special case always failed and would just
- # give superfluous warnings.
- # 2013-09-24, 1.9
- warnings.warn(
- "using a non-integer array as obj in delete will result in an "
- "error in the future", DeprecationWarning, stacklevel=3)
- obj = obj.astype(intp)
- keep = ones(N, dtype=bool)
- # Test if there are out of bound indices, this is deprecated
- inside_bounds = (obj < N) & (obj >= -N)
- if not inside_bounds.all():
- # 2013-09-24, 1.9
- warnings.warn(
- "in the future out of bounds indices will raise an error "
- "instead of being ignored by `numpy.delete`.",
- DeprecationWarning, stacklevel=3)
- obj = obj[inside_bounds]
- positive_indices = obj >= 0
- if not positive_indices.all():
- warnings.warn(
- "in the future negative indices will not be ignored by "
- "`numpy.delete`.", FutureWarning, stacklevel=3)
- obj = obj[positive_indices]
+ if obj.dtype == bool:
+ if obj.shape != (N,):
+ raise ValueError('boolean array argument obj to delete '
+ 'must be one dimensional and match the axis '
+ 'length of {}'.format(N))
+
+ # optimization, the other branch is slower
+ keep = ~obj
+ else:
+ keep = ones(N, dtype=bool)
+ keep[obj,] = False
- keep[obj, ] = False
slobj[axis] = keep
new = arr[tuple(slobj)]
@@ -4519,19 +4483,9 @@ def insert(arr, obj, values, axis=None):
if axis is None:
if ndim != 1:
arr = arr.ravel()
+ # needed for np.matrix, which is still not 1d after being ravelled
ndim = arr.ndim
axis = ndim - 1
- elif ndim == 0:
- # 2013-09-24, 1.9
- warnings.warn(
- "in the future the special handling of scalars will be removed "
- "from insert and raise an error", DeprecationWarning, stacklevel=3)
- arr = arr.copy(order=arrorder)
- arr[...] = values
- if wrap:
- return wrap(arr)
- else:
- return arr
else:
axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)]*ndim
@@ -4540,12 +4494,13 @@ def insert(arr, obj, values, axis=None):
if isinstance(obj, slice):
# turn it into a range object
- indices = arange(*obj.indices(N), **{'dtype': intp})
+ indices = arange(*obj.indices(N), dtype=intp)
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
+ # 2012-10-11, NumPy 1.8
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
@@ -4595,13 +4550,6 @@ def insert(arr, obj, values, axis=None):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
- if not np.can_cast(indices, intp, 'same_kind'):
- # 2013-09-24, 1.9
- warnings.warn(
- "using a non-integer array as obj in insert will result in an "
- "error in the future", DeprecationWarning, stacklevel=3)
- indices = indices.astype(intp)
-
indices[indices < 0] += N
numnew = len(indices)
@@ -4672,7 +4620,9 @@ def append(arr, values, axis=None):
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
- ValueError: all the input arrays must have same number of dimensions
+ ValueError: all the input arrays must have same number of dimensions, but
+ the array at index 0 has 2 dimension(s) and the array at index 1 has 1
+ dimension(s)
"""
arr = asanyarray(arr)
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 8474bd5d3..ede8a26e4 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -1,15 +1,12 @@
"""
Histogram-related functions
"""
-from __future__ import division, absolute_import, print_function
-
import contextlib
import functools
import operator
import warnings
import numpy as np
-from numpy.compat.py3k import basestring
from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
@@ -22,6 +19,16 @@ array_function_dispatch = functools.partial(
_range = range
+def _ptp(x):
+ """Peak-to-peak value of x.
+
+ This implementation avoids the problem of signed integer arrays having a
+ peak-to-peak value that cannot be represented with the array's data type.
+ This function returns an unsigned value for signed integer arrays.
+ """
+ return _unsigned_subtract(x.max(), x.min())
+
+
def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
@@ -40,7 +47,7 @@ def _hist_bin_sqrt(x, range):
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
- return x.ptp() / np.sqrt(x.size)
+ return _ptp(x) / np.sqrt(x.size)
def _hist_bin_sturges(x, range):
@@ -63,7 +70,7 @@ def _hist_bin_sturges(x, range):
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
- return x.ptp() / (np.log2(x.size) + 1.0)
+ return _ptp(x) / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x, range):
@@ -87,7 +94,7 @@ def _hist_bin_rice(x, range):
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
- return x.ptp() / (2.0 * x.size ** (1.0 / 3))
+ return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x, range):
@@ -137,7 +144,7 @@ def _hist_bin_stone(x, range):
"""
n = x.size
- ptp_x = np.ptp(x)
+ ptp_x = _ptp(x)
if n <= 1 or ptp_x == 0:
return 0
@@ -184,7 +191,7 @@ def _hist_bin_doane(x, range):
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
- return x.ptp() / (1.0 + np.log2(x.size) +
+ return _ptp(x) / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
@@ -200,7 +207,7 @@ def _hist_bin_fd(x, range):
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
- If the IQR is 0, this function returns 1 for the number of bins.
+ If the IQR is 0, this function returns 0 for the bin width.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
@@ -222,21 +229,21 @@ def _hist_bin_fd(x, range):
def _hist_bin_auto(x, range):
"""
Histogram bin estimator that uses the minimum width of the
- Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
- and the Sturges estimator if the FD bandwidth is 0.
+ Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
+ If the bin width from the FD estimator is 0, the Sturges estimator is used.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
- and is the default in the R language. This method gives good off the shelf
+ and is the default in the R language. This method gives good off-the-shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
- If the IQR is 0, it's unlikely any variance based estimators will be of
- use, so we revert to the sturges estimator, which only uses the size of the
+ If the IQR is 0, it's unlikely any variance-based estimators will be of
+ use, so we revert to the Sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
@@ -375,7 +382,7 @@ def _get_bin_edges(a, bins, range, weights):
n_equal_bins = None
bin_edges = None
- if isinstance(bins, basestring):
+ if isinstance(bins, str):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
@@ -946,9 +953,9 @@ def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
Note the unusual interpretation of sample when an array_like:
* When an array, each row is a coordinate in a D-dimensional space -
- such as ``histogramgramdd(np.array([p1, p2, p3]))``.
+ such as ``histogramdd(np.array([p1, p2, p3]))``.
* When an array_like, each element is the list of values for single
- coordinate - such as ``histogramgramdd((X, Y, Z))``.
+ coordinate - such as ``histogramdd((X, Y, Z))``.
The first form should be preferred.
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 04384854c..b4118814d 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import functools
import sys
import math
@@ -107,7 +105,7 @@ def ix_(*args):
out.append(new)
return tuple(out)
-class nd_grid(object):
+class nd_grid:
"""
Construct a multi-dimensional "meshgrid".
@@ -299,7 +297,7 @@ class OGridClass(nd_grid):
ogrid = OGridClass()
-class AxisConcatenator(object):
+class AxisConcatenator:
"""
Translates slice objects to concatenation along an axis.
@@ -552,7 +550,7 @@ c_ = CClass()
@set_module('numpy')
-class ndenumerate(object):
+class ndenumerate:
"""
Multidimensional index iterator.
@@ -599,11 +597,9 @@ class ndenumerate(object):
def __iter__(self):
return self
- next = __next__
-
@set_module('numpy')
-class ndindex(object):
+class ndindex:
"""
An N-dimensional iterator object to index arrays.
@@ -667,8 +663,6 @@ class ndindex(object):
next(self._it)
return self._it.multi_index
- next = __next__
-
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
@@ -681,7 +675,7 @@ class ndindex(object):
#
#
-class IndexExpression(object):
+class IndexExpression:
"""
A nicer way to build up index tuples for arrays.
diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py
index f974a7724..50157069c 100644
--- a/numpy/lib/mixins.py
+++ b/numpy/lib/mixins.py
@@ -1,8 +1,4 @@
"""Mixin classes for custom array types that don't inherit from ndarray."""
-from __future__ import division, absolute_import, print_function
-
-import sys
-
from numpy.core import umath as um
@@ -60,7 +56,7 @@ def _unary_method(ufunc, name):
return func
-class NDArrayOperatorsMixin(object):
+class NDArrayOperatorsMixin:
"""Mixin defining all operator special methods using __array_ufunc__.
This class implements the special methods for almost all of Python's
@@ -154,9 +150,7 @@ class NDArrayOperatorsMixin(object):
__mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
__matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
um.matmul, 'matmul')
- if sys.version_info.major < 3:
- # Python 3 uses only __truediv__ and __floordiv__
- __div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, 'div')
+ # Python 3 does not use __div__, __rdiv__, or __idiv__
__truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
um.true_divide, 'truediv')
__floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 6cffab6ac..003550432 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -20,8 +20,6 @@ Functions
- `nanpercentile` -- qth percentile of non-NaN values
"""
-from __future__ import division, absolute_import, print_function
-
import functools
import warnings
import numpy as np
@@ -95,17 +93,18 @@ def _replace_nan(a, val):
NaNs, otherwise return None.
"""
- a = np.array(a, subok=True, copy=True)
+ a = np.asanyarray(a)
if a.dtype == np.object_:
# object arrays do not support `isnan` (gh-9009), so make a guess
- mask = a != a
+ mask = np.not_equal(a, a, dtype=bool)
elif issubclass(a.dtype.type, np.inexact):
mask = np.isnan(a)
else:
mask = None
if mask is not None:
+ a = np.array(a, subok=True, copy=True)
np.copyto(a, val, where=mask)
return a, mask
@@ -244,8 +243,8 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
- expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details.
+ expected output, but the type will be cast if necessary. See
+ `ufuncs-output-type` for more details.
.. versionadded:: 1.8.0
keepdims : bool, optional
@@ -359,8 +358,8 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
- expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details.
+ expected output, but the type will be cast if necessary. See
+ `ufuncs-output-type` for more details.
.. versionadded:: 1.8.0
keepdims : bool, optional
@@ -585,8 +584,8 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details. The casting of NaN to integer can yield
- unexpected results.
+ `ufuncs-output-type` for more details. The casting of NaN to integer
+ can yield unexpected results.
.. versionadded:: 1.8.0
keepdims : bool, optional
@@ -681,9 +680,9 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
- expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details. The casting of NaN to integer can yield
- unexpected results.
+ expected output, but the type will be cast if necessary. See
+ `ufuncs-output-type` for more details. The casting of NaN to integer
+ can yield unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
@@ -750,8 +749,8 @@ def nancumsum(a, axis=None, dtype=None, out=None):
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
- but the type will be cast if necessary. See `doc.ufuncs`
- (Section "Output arguments") for more details.
+ but the type will be cast if necessary. See `ufuncs-output-type` for
+ more details.
Returns
-------
@@ -888,8 +887,8 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
- expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details.
+ expected output, but the type will be cast if necessary. See
+ `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
@@ -1473,7 +1472,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
mean : Average
var : Variance while not ignoring NaNs
nanstd, nanmean
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
@@ -1625,7 +1624,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
--------
var, mean, std
nanvar, nanmean
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index e57a6dd47..0db2e6897 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import os
import re
@@ -9,6 +7,7 @@ import warnings
import weakref
import contextlib
from operator import itemgetter, index as opindex
+from collections.abc import Mapping
import numpy as np
from . import format
@@ -24,16 +23,10 @@ from ._iotools import (
)
from numpy.compat import (
- asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
+ asbytes, asstr, asunicode, bytes, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
-if sys.version_info[0] >= 3:
- from collections.abc import Mapping
-else:
- from future_builtins import map
- from collections import Mapping
-
@set_module('numpy')
def loads(*args, **kwargs):
@@ -55,7 +48,7 @@ array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
-class BagObj(object):
+class BagObj:
"""
BagObj(obj)
@@ -69,7 +62,7 @@ class BagObj(object):
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
- >>> class BagDemo(object):
+ >>> class BagDemo:
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
@@ -266,26 +259,25 @@ class NpzFile(Mapping):
raise KeyError("%s is not a file in the archive" % key)
- if sys.version_info.major == 3:
- # deprecate the python 2 dict apis that we supported by accident in
- # python 3. We forgot to implement itervalues() at all in earlier
- # versions of numpy, so no need to deprecated it here.
+ # deprecate the python 2 dict apis that we supported by accident in
+ # python 3. We forgot to implement itervalues() at all in earlier
+ # versions of numpy, so no need to deprecated it here.
- def iteritems(self):
- # Numpy 1.15, 2018-02-20
- warnings.warn(
- "NpzFile.iteritems is deprecated in python 3, to match the "
- "removal of dict.itertems. Use .items() instead.",
- DeprecationWarning, stacklevel=2)
- return self.items()
+ def iteritems(self):
+ # Numpy 1.15, 2018-02-20
+ warnings.warn(
+ "NpzFile.iteritems is deprecated in python 3, to match the "
+ "removal of dict.itertems. Use .items() instead.",
+ DeprecationWarning, stacklevel=2)
+ return self.items()
- def iterkeys(self):
- # Numpy 1.15, 2018-02-20
- warnings.warn(
- "NpzFile.iterkeys is deprecated in python 3, to match the "
- "removal of dict.iterkeys. Use .keys() instead.",
- DeprecationWarning, stacklevel=2)
- return self.keys()
+ def iterkeys(self):
+ # Numpy 1.15, 2018-02-20
+ warnings.warn(
+ "NpzFile.iterkeys is deprecated in python 3, to match the "
+ "removal of dict.iterkeys. Use .keys() instead.",
+ DeprecationWarning, stacklevel=2)
+ return self.keys()
@set_module('numpy')
@@ -414,21 +406,16 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
- if sys.version_info[0] >= 3:
- pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
- else:
- # Nothing to do on Python 2
- pickle_kwargs = {}
+ pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
- # TODO: Use contextlib.ExitStack once we drop Python 2
- if hasattr(file, 'read'):
- fid = file
- own_fid = False
- else:
- fid = open(os_fspath(file), "rb")
- own_fid = True
+ with contextlib.ExitStack() as stack:
+ if hasattr(file, 'read'):
+ fid = file
+ own_fid = False
+ else:
+ fid = stack.enter_context(open(os_fspath(file), "rb"))
+ own_fid = True
- try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
@@ -439,10 +426,10 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
# zip-file (assume .npz)
- # Transfer file ownership to NpzFile
+ # Potentially transfer file ownership to NpzFile
+ stack.pop_all()
ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
- own_fid = False
return ret
elif magic == format.MAGIC_PREFIX:
# .npy file
@@ -461,9 +448,6 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
- finally:
- if own_fid:
- fid.close()
def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
@@ -480,7 +464,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
- extension will be appended to the file name if it does not already
+ extension will be appended to the filename if it does not already
have one.
arr : array_like
Array data to be saved.
@@ -506,9 +490,9 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
-
- Any data saved to the file is appended to the end of the file.
-
+
+ Any data saved to the file is appended to the end of the file.
+
Examples
--------
>>> from tempfile import TemporaryFile
@@ -524,49 +508,35 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
>>> with open('test.npy', 'wb') as f:
... np.save(f, np.array([1, 2]))
- ... np.save(f, np.array([1, 3]))
+ ... np.save(f, np.array([1, 3]))
>>> with open('test.npy', 'rb') as f:
... a = np.load(f)
... b = np.load(f)
>>> print(a, b)
# [1 2] [1 3]
"""
- own_fid = False
if hasattr(file, 'write'):
- fid = file
+ file_ctx = contextlib_nullcontext(file)
else:
file = os_fspath(file)
if not file.endswith('.npy'):
file = file + '.npy'
- fid = open(file, "wb")
- own_fid = True
+ file_ctx = open(file, "wb")
- if sys.version_info[0] >= 3:
- pickle_kwargs = dict(fix_imports=fix_imports)
- else:
- # Nothing to do on Python 2
- pickle_kwargs = None
-
- try:
+ with file_ctx as fid:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
- pickle_kwargs=pickle_kwargs)
- finally:
- if own_fid:
- fid.close()
+ pickle_kwargs=dict(fix_imports=fix_imports))
def _savez_dispatcher(file, *args, **kwds):
- for a in args:
- yield a
- for v in kwds.values():
- yield v
+ yield from args
+ yield from kwds.values()
@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
- """
- Save several arrays into a single file in uncompressed ``.npz`` format.
+ """Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
@@ -576,9 +546,9 @@ def savez(file, *args, **kwds):
Parameters
----------
file : str or file
- Either the file name (string) or an open file (file-like object)
+ Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
- ``.npz`` extension will be appended to the file name if it is not
+ ``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
@@ -611,6 +581,10 @@ def savez(file, *args, **kwds):
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
+ When saving dictionaries, the dictionary keys become filenames
+ inside the ZIP archive. Therefore, keys should be valid filenames.
+ E.g., avoid keys that begin with ``/`` or contain ``.``.
+
Examples
--------
>>> from tempfile import TemporaryFile
@@ -638,16 +612,13 @@ def savez(file, *args, **kwds):
['x', 'y']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
-
"""
_savez(file, args, kwds, False)
def _savez_compressed_dispatcher(file, *args, **kwds):
- for a in args:
- yield a
- for v in kwds.values():
- yield v
+ yield from args
+ yield from kwds.values()
@array_function_dispatch(_savez_compressed_dispatcher)
@@ -656,15 +627,15 @@ def savez_compressed(file, *args, **kwds):
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
- If arguments are passed in with no keywords, then stored file names are
+ If arguments are passed in with no keywords, then stored filenames are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
- Either the file name (string) or an open file (file-like object)
+ Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
- ``.npz`` extension will be appended to the file name if it is not
+ ``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
@@ -691,7 +662,7 @@ def savez_compressed(file, *args, **kwds):
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
- in ``.npy`` format. For a description of the ``.npy`` format, see
+ in ``.npy`` format. For a description of the ``.npy`` format, see
:py:mod:`numpy.lib.format`.
@@ -831,7 +802,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
- generators should return byte strings for Python 3k.
+ generators should return byte strings.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
@@ -934,7 +905,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
"""
# Type conversions for Py3 convenience
if comments is not None:
- if isinstance(comments, (basestring, bytes)):
+ if isinstance(comments, (str, bytes)):
comments = [comments]
comments = [_decode_line(x) for x in comments]
# Compile regex for comments beforehand
@@ -1334,8 +1305,8 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
fmt = asstr(fmt)
delimiter = asstr(delimiter)
- class WriteWrap(object):
- """Convert to unicode in py2 or to bytes on bytestream inputs.
+ class WriteWrap:
+ """Convert to bytes on bytestream inputs.
"""
def __init__(self, fh, encoding):
@@ -1375,9 +1346,6 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
open(fname, 'wt').close()
fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
own_fh = True
- # need to convert str to unicode for text io output
- if sys.version_info[0] == 2:
- fh = WriteWrap(fh, encoding or 'latin1')
elif hasattr(fname, 'write'):
# wrap to handle byte output streams
fh = WriteWrap(fname, encoding or 'latin1')
@@ -1410,7 +1378,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
- elif isinstance(fmt, basestring):
+ elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
@@ -1469,7 +1437,7 @@ def fromregex(file, regexp, dtype, encoding=None):
Parameters
----------
file : str or file
- File name or file object to read.
+ Filename or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
@@ -1527,9 +1495,9 @@ def fromregex(file, regexp, dtype, encoding=None):
dtype = np.dtype(dtype)
content = file.read()
- if isinstance(content, bytes) and isinstance(regexp, np.unicode):
+ if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode):
regexp = asbytes(regexp)
- elif isinstance(content, np.unicode) and isinstance(regexp, bytes):
+ elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
@@ -1576,7 +1544,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
- that generators must return byte strings in Python 3k. The strings
+ that generators must return byte strings. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
@@ -1766,7 +1734,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
- if isinstance(fname, basestring):
+ if isinstance(fname, str):
fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fid_ctx = contextlib.closing(fid)
else:
@@ -1908,7 +1876,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
- elif isinstance(user_missing_values, basestring):
+ elif isinstance(user_missing_values, str):
user_value = user_missing_values.split(",")
for entry in missing_values:
entry.extend(user_value)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 2c72f623c..5a0fa5431 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -2,8 +2,6 @@
Functions to operate on polynomials.
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
@@ -479,10 +477,10 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
- Present only if `full` = True. Residuals of the least-squares fit,
- the effective rank of the scaled Vandermonde coefficient matrix,
- its singular values, and the specified value of `rcond`. For more
- details, see `linalg.lstsq`.
+ Present only if `full` = True. Residuals is sum of squared residuals
+ of the least-squares fit, the effective rank of the scaled Vandermonde
+ coefficient matrix, its singular values, and the specified value of
+ `rcond`. For more details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
@@ -1007,7 +1005,7 @@ def _raise_power(astr, wrap=70):
@set_module('numpy')
-class poly1d(object):
+class poly1d:
"""
A one-dimensional polynomial class.
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index 927161ddb..a11d5f2c7 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -5,9 +5,6 @@ Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
-from __future__ import division, absolute_import, print_function
-
-import sys
import itertools
import numpy as np
import numpy.ma as ma
@@ -16,12 +13,8 @@ from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.core.overrides import array_function_dispatch
from numpy.lib._iotools import _is_string_like
-from numpy.compat import basestring
from numpy.testing import suppress_warnings
-if sys.version_info[0] < 3:
- from future_builtins import zip
-
_check_fill_value = np.ma.core._check_fill_value
@@ -292,8 +285,7 @@ def _izip_fields_flat(iterable):
"""
for element in iterable:
if isinstance(element, np.void):
- for f in _izip_fields_flat(tuple(element)):
- yield f
+ yield from _izip_fields_flat(tuple(element))
else:
yield element
@@ -305,12 +297,11 @@ def _izip_fields(iterable):
"""
for element in iterable:
if (hasattr(element, '__iter__') and
- not isinstance(element, basestring)):
- for f in _izip_fields(element):
- yield f
+ not isinstance(element, str)):
+ yield from _izip_fields(element)
elif isinstance(element, np.void) and len(tuple(element)) == 1:
- for f in _izip_fields(element):
- yield f
+ # this statement is the same from the previous expression
+ yield from _izip_fields(element)
else:
yield element
@@ -335,12 +326,7 @@ def _izip_records(seqarrays, fill_value=None, flatten=True):
else:
zipfunc = _izip_fields
- if sys.version_info[0] >= 3:
- zip_longest = itertools.zip_longest
- else:
- zip_longest = itertools.izip_longest
-
- for tup in zip_longest(*seqarrays, fillvalue=fill_value):
+ for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
yield tuple(zipfunc(tup))
@@ -438,7 +424,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
if seqdtype.names is None:
seqdtype = np.dtype([('', seqdtype)])
if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
- # Minimal processing needed: just make sure everythng's a-ok
+ # Minimal processing needed: just make sure everything's a-ok
seqarrays = seqarrays.ravel()
# Find what type of array we must return
if usemask:
@@ -669,8 +655,7 @@ def rename_fields(base, namemapper):
def _append_fields_dispatcher(base, names, data, dtypes=None,
fill_value=None, usemask=None, asrecarray=None):
yield base
- for d in data:
- yield d
+ yield from data
@array_function_dispatch(_append_fields_dispatcher)
@@ -709,7 +694,7 @@ def append_fields(base, names, data, dtypes=None,
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
- elif isinstance(names, basestring):
+ elif isinstance(names, str):
names = [names, ]
data = [data, ]
#
@@ -746,8 +731,7 @@ def append_fields(base, names, data, dtypes=None,
def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
yield base
- for d in data:
- yield d
+ yield from data
@array_function_dispatch(_rec_append_fields_dispatcher)
@@ -1466,7 +1450,7 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
- if isinstance(key, basestring):
+ if isinstance(key, str):
key = (key,)
# Check the keys
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index 5ac790ce9..555a3d5a8 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -15,8 +15,6 @@ Similarly, `sqrt`, other base logarithms, `power` and trig functions are
correctly handled. See their respective docstrings for specific examples.
"""
-from __future__ import division, absolute_import, print_function
-
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
diff --git a/numpy/lib/setup.py b/numpy/lib/setup.py
index d342410b8..5d0341d86 100644
--- a/numpy/lib/setup.py
+++ b/numpy/lib/setup.py
@@ -1,5 +1,3 @@
-from __future__ import division, print_function
-
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 92d52109e..b7f1f16f2 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -1,7 +1,4 @@
-from __future__ import division, absolute_import, print_function
-
import functools
-import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
@@ -11,6 +8,7 @@ from numpy.core.fromnumeric import reshape, transpose
from numpy.core.multiarray import normalize_axis_index
from numpy.core import overrides
from numpy.core import vstack, atleast_3d
+from numpy.core.numeric import normalize_axis_tuple
from numpy.core.shape_base import _arrays_for_stack_dispatcher
from numpy.lib.index_tricks import ndindex
from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
@@ -29,7 +27,7 @@ array_function_dispatch = functools.partial(
def _make_along_axis_idx(arr_shape, indices, axis):
- # compute dimensions to iterate over
+ # compute dimensions to iterate over
if not _nx.issubdtype(indices.dtype, _nx.integer):
raise IndexError('`indices` must be an integer array')
if len(arr_shape) != indices.ndim:
@@ -271,8 +269,8 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
- Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
- is a 1-D slice of `arr` along `axis`.
+ Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays
+ and `a` is a 1-D slice of `arr` along `axis`.
This is equivalent to (but faster than) the following use of `ndindex` and
`s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
@@ -517,22 +515,26 @@ def expand_dims(a, axis):
Insert a new axis that will appear at the `axis` position in the expanded
array shape.
- .. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor
- ``axis > a.ndim`` raised errors or put the new axis where documented.
- Those axis values are now deprecated and will raise an AxisError in the
- future.
-
Parameters
----------
a : array_like
Input array.
- axis : int
- Position in the expanded axes where the new axis is placed.
+ axis : int or tuple of ints
+ Position in the expanded axes where the new axis (or axes) is placed.
+
+ .. deprecated:: 1.13.0
+ Passing an axis where ``axis > a.ndim`` will be treated as
+ ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will
+ be treated as ``axis == 0``. This behavior is deprecated.
+
+ .. versionchanged:: 1.18.0
+ A tuple of axes is now supported. Out of range axes as
+ described above are now forbidden and raise an `AxisError`.
Returns
-------
- res : ndarray
- View of `a` with the number of dimensions increased by one.
+ result : ndarray
+ View of `a` with the number of dimensions increased.
See Also
--------
@@ -542,11 +544,11 @@ def expand_dims(a, axis):
Examples
--------
- >>> x = np.array([1,2])
+ >>> x = np.array([1, 2])
>>> x.shape
(2,)
- The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
+ The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
@@ -554,13 +556,26 @@ def expand_dims(a, axis):
>>> y.shape
(1, 2)
- >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]
+ The following is equivalent to ``x[:, np.newaxis]``:
+
+ >>> y = np.expand_dims(x, axis=1)
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
+ ``axis`` may also be a tuple:
+
+ >>> y = np.expand_dims(x, axis=(0, 1))
+ >>> y
+ array([[[1, 2]]])
+
+ >>> y = np.expand_dims(x, axis=(2, 0))
+ >>> y
+ array([[[1],
+ [2]]])
+
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
@@ -573,18 +588,16 @@ def expand_dims(a, axis):
else:
a = asanyarray(a)
- shape = a.shape
- if axis > a.ndim or axis < -a.ndim - 1:
- # 2017-05-17, 1.13.0
- warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are "
- "deprecated and will raise an AxisError in the future.",
- DeprecationWarning, stacklevel=3)
- # When the deprecation period expires, delete this if block,
- if axis < 0:
- axis = axis + a.ndim + 1
- # and uncomment the following line.
- # axis = normalize_axis_index(axis, a.ndim + 1)
- return a.reshape(shape[:axis] + (1,) + shape[axis:])
+ if type(axis) not in (tuple, list):
+ axis = (axis,)
+
+ out_ndim = len(axis) + a.ndim
+ axis = normalize_axis_tuple(axis, out_ndim)
+
+ shape_it = iter(a.shape)
+ shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
+
+ return a.reshape(shape)
row_stack = vstack
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 8aafd094b..502235bdf 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -5,15 +5,13 @@ An explanation of strides can be found in the "ndarray.rst" file in the
NumPy reference guide.
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.core.overrides import array_function_dispatch
__all__ = ['broadcast_to', 'broadcast_arrays']
-class DummyArray(object):
+class DummyArray:
"""Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
@@ -199,12 +197,12 @@ def _broadcast_shape(*args):
return b.shape
-def _broadcast_arrays_dispatcher(*args, **kwargs):
+def _broadcast_arrays_dispatcher(*args, subok=None):
return args
@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
-def broadcast_arrays(*args, **kwargs):
+def broadcast_arrays(*args, subok=False):
"""
Broadcast any number of arrays against each other.
@@ -255,10 +253,6 @@ def broadcast_arrays(*args, **kwargs):
# return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
# order='C').itviews
- subok = kwargs.pop('subok', False)
- if kwargs:
- raise TypeError('broadcast_arrays() got an unexpected keyword '
- 'argument {!r}'.format(list(kwargs.keys())[0]))
args = [np.array(_m, copy=False, subok=subok) for _m in args]
shape = _broadcast_shape(*args)
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index 8eac16b58..1ed7815d9 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -1,24 +1,14 @@
-from __future__ import division, absolute_import, print_function
-
import os
-import sys
import pytest
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
import numpy.lib._datasource as datasource
-from numpy.testing import (
- assert_, assert_equal, assert_raises, assert_warns
- )
+from numpy.testing import assert_, assert_equal, assert_raises
-if sys.version_info[0] >= 3:
- import urllib.request as urllib_request
- from urllib.parse import urlparse
- from urllib.error import URLError
-else:
- import urllib2 as urllib_request
- from urlparse import urlparse
- from urllib2 import URLError
+import urllib.request as urllib_request
+from urllib.parse import urlparse
+from urllib.error import URLError
def urlopen_stub(url, data=None):
@@ -96,7 +86,7 @@ def invalid_httpfile():
return http_fakefile
-class TestDataSourceOpen(object):
+class TestDataSourceOpen:
def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
@@ -164,26 +154,8 @@ class TestDataSourceOpen(object):
fp.close()
assert_equal(magic_line, result)
- @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only")
- def test_Bz2File_text_mode_warning(self):
- try:
- import bz2
- except ImportError:
- # We don't have the bz2 capabilities to test.
- pytest.skip()
- # Test datasource's internal file_opener for BZip2 files.
- filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
- fp = bz2.BZ2File(filepath, 'w')
- fp.write(magic_line)
- fp.close()
- with assert_warns(RuntimeWarning):
- fp = self.ds.open(filepath, 'rt')
- result = fp.readline()
- fp.close()
- assert_equal(magic_line, result)
-
-class TestDataSourceExists(object):
+class TestDataSourceExists:
def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
@@ -213,7 +185,7 @@ class TestDataSourceExists(object):
assert_equal(self.ds.exists(tmpfile), False)
-class TestDataSourceAbspath(object):
+class TestDataSourceAbspath:
def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.ds = datasource.DataSource(self.tmpdir)
@@ -278,7 +250,7 @@ class TestDataSourceAbspath(object):
os.sep = orig_os_sep
-class TestRepositoryAbspath(object):
+class TestRepositoryAbspath:
def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
@@ -311,7 +283,7 @@ class TestRepositoryAbspath(object):
os.sep = orig_os_sep
-class TestRepositoryExists(object):
+class TestRepositoryExists:
def setup(self):
self.tmpdir = mkdtemp()
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
@@ -344,7 +316,7 @@ class TestRepositoryExists(object):
assert_(self.repos.exists(tmpfile))
-class TestOpenFunc(object):
+class TestOpenFunc:
def setup(self):
self.tmpdir = mkdtemp()
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index 15cd3ad9d..6964c1128 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import time
from datetime import date
@@ -11,10 +9,9 @@ from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
has_nested_fields, easy_dtype, flatten_dtype
)
-from numpy.compat import unicode
-class TestLineSplitter(object):
+class TestLineSplitter:
"Tests the LineSplitter class."
def test_no_delimiter(self):
@@ -83,7 +80,7 @@ class TestLineSplitter(object):
# -----------------------------------------------------------------------------
-class TestNameValidator(object):
+class TestNameValidator:
def test_case_sensitivity(self):
"Test case sensitivity"
@@ -141,7 +138,7 @@ def _bytes_to_date(s):
return date(*time.strptime(s, "%Y-%m-%d")[:3])
-class TestStringConverter(object):
+class TestStringConverter:
"Test StringConverter"
def test_creation(self):
@@ -181,10 +178,10 @@ class TestStringConverter(object):
# note that the longdouble type has been skipped, so the
# _status increases by 2. Everything should succeed with
# unicode conversion (5).
- for s in ['a', u'a', b'a']:
+ for s in ['a', b'a']:
res = converter.upgrade(s)
- assert_(type(res) is unicode)
- assert_equal(res, u'a')
+ assert_(type(res) is str)
+ assert_equal(res, 'a')
assert_equal(converter._status, 5 + status_offset)
def test_missing(self):
@@ -266,7 +263,7 @@ class TestStringConverter(object):
assert_(converter(val) == 9223372043271415339)
-class TestMiscFunctions(object):
+class TestMiscFunctions:
def test_has_nested_dtype(self):
"Test has_nested_dtype"
diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py
index 8e66a0c03..182504631 100644
--- a/numpy/lib/tests/test__version.py
+++ b/numpy/lib/tests/test__version.py
@@ -1,8 +1,6 @@
"""Tests for the NumpyVersion class.
"""
-from __future__ import division, absolute_import, print_function
-
from numpy.testing import assert_, assert_raises
from numpy.lib import NumpyVersion
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 65593dd29..75db5928b 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -1,8 +1,6 @@
"""Tests for the array padding functions.
"""
-from __future__ import division, absolute_import, print_function
-
import pytest
import numpy as np
@@ -31,7 +29,7 @@ _all_modes = {
}
-class TestAsPairs(object):
+class TestAsPairs:
def test_single_value(self):
"""Test casting for a single value."""
expected = np.array([[3, 3]] * 10)
@@ -114,7 +112,7 @@ class TestAsPairs(object):
_as_pairs(np.ones((2, 3)), 3)
-class TestConditionalShortcuts(object):
+class TestConditionalShortcuts:
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_zero_padding_shortcuts(self, mode):
test = np.arange(120).reshape(4, 5, 6)
@@ -136,7 +134,7 @@ class TestConditionalShortcuts(object):
np.pad(test, pad_amt, mode=mode, stat_length=30))
-class TestStatistic(object):
+class TestStatistic:
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
@@ -498,7 +496,7 @@ class TestStatistic(object):
np.pad([1., 2.], 1, mode, stat_length=(1, 0))
-class TestConstant(object):
+class TestConstant:
def test_check_constant(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20))
@@ -677,7 +675,7 @@ class TestConstant(object):
assert result.shape == (3, 4, 4)
-class TestLinearRamp(object):
+class TestLinearRamp:
def test_check_simple(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
@@ -762,7 +760,7 @@ class TestLinearRamp(object):
assert_equal(result, expected)
-class TestReflect(object):
+class TestReflect:
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'reflect')
@@ -872,7 +870,7 @@ class TestReflect(object):
assert_array_equal(a, b)
-class TestEmptyArray(object):
+class TestEmptyArray:
"""Check how padding behaves on arrays with an empty dimension."""
@pytest.mark.parametrize(
@@ -896,7 +894,7 @@ class TestEmptyArray(object):
assert result.shape == (8, 0, 4)
-class TestSymmetric(object):
+class TestSymmetric:
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'symmetric')
@@ -1030,7 +1028,7 @@ class TestSymmetric(object):
assert_array_equal(a, b)
-class TestWrap(object):
+class TestWrap:
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'wrap')
@@ -1144,7 +1142,7 @@ class TestWrap(object):
assert_array_equal(np.r_[a, a, a, a][:-3], b)
-class TestEdge(object):
+class TestEdge:
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
@@ -1183,7 +1181,7 @@ class TestEdge(object):
assert_array_equal(padded, expected)
-class TestEmpty(object):
+class TestEmpty:
def test_simple(self):
arr = np.arange(24).reshape(4, 6)
result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")
@@ -1231,7 +1229,7 @@ def test_object_input(mode):
assert_array_equal(np.pad(a, pad_amt, mode=mode), b)
-class TestPadWidth(object):
+class TestPadWidth:
@pytest.mark.parametrize("pad_width", [
(4, 5, 6, 7),
((1,), (2,), (3,)),
@@ -1262,24 +1260,29 @@ class TestPadWidth(object):
with pytest.raises(ValueError, match=match):
np.pad(arr, pad_width, mode)
- @pytest.mark.parametrize("pad_width", [
- "3",
- "word",
- None,
- object(),
- 3.4,
- ((2, 3, 4), (3, 2)), # dtype=object (tuple)
- complex(1, -1),
- ((-2.1, 3), (3, 2)),
+ @pytest.mark.parametrize("pad_width, dtype", [
+ ("3", None),
+ ("word", None),
+ (None, None),
+ (object(), None),
+ (3.4, None),
+ (((2, 3, 4), (3, 2)), object),
+ (complex(1, -1), None),
+ (((-2.1, 3), (3, 2)), None),
])
@pytest.mark.parametrize("mode", _all_modes.keys())
- def test_bad_type(self, pad_width, mode):
+ def test_bad_type(self, pad_width, dtype, mode):
arr = np.arange(30).reshape((6, 5))
match = "`pad_width` must be of integral type."
- with pytest.raises(TypeError, match=match):
- np.pad(arr, pad_width, mode)
- with pytest.raises(TypeError, match=match):
- np.pad(arr, np.array(pad_width), mode)
+ if dtype is not None:
+ # avoid DeprecationWarning when not specifying dtype
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, np.array(pad_width, dtype=dtype), mode)
+ else:
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, pad_width, mode)
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, np.array(pad_width), mode)
def test_pad_width_as_ndarray(self):
a = np.arange(12)
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index fd21a7f76..81ba789e3 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -1,8 +1,6 @@
"""Test functions for 1D array set operations.
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import (assert_array_equal, assert_equal,
@@ -13,8 +11,7 @@ from numpy.lib.arraysetops import (
import pytest
-
-class TestSetOps(object):
+class TestSetOps:
def test_intersect1d(self):
# unique inputs
@@ -36,7 +33,7 @@ class TestSetOps(object):
def test_intersect1d_array_like(self):
# See gh-11772
- class Test(object):
+ class Test:
def __array__(self):
return np.arange(3)
@@ -120,12 +117,13 @@ class TestSetOps(object):
assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))
assert_array_equal([], ediff1d(one_elem))
assert_array_equal([1], ediff1d(two_elem))
- assert_array_equal([7,1,9], ediff1d(two_elem, to_begin=7, to_end=9))
- assert_array_equal([5,6,1,7,8], ediff1d(two_elem, to_begin=[5,6], to_end=[7,8]))
- assert_array_equal([1,9], ediff1d(two_elem, to_end=9))
- assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8]))
- assert_array_equal([7,1], ediff1d(two_elem, to_begin=7))
- assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
+ assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9))
+ assert_array_equal([5, 6, 1, 7, 8],
+ ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8]))
+ assert_array_equal([1, 9], ediff1d(two_elem, to_end=9))
+ assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8]))
+ assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7))
+ assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6]))
@pytest.mark.parametrize("ary, prepend, append", [
# should fail because trying to cast
@@ -135,9 +133,9 @@ class TestSetOps(object):
None,
np.nan),
# should fail because attempting
- # to downcast to smaller int type:
- (np.array([1, 2, 3], dtype=np.int16),
- np.array([5, 1<<20, 2], dtype=np.int32),
+ # to downcast to int type:
+ (np.array([1, 2, 3], dtype=np.int64),
+ np.array([5, 7, 2], dtype=np.float32),
None),
# should fail because attempting to cast
# two special floating point values
@@ -152,29 +150,33 @@ class TestSetOps(object):
# specifically, raise an appropriate
# Exception when attempting to append or
# prepend with an incompatible type
- msg = 'cannot convert'
- with assert_raises_regex(ValueError, msg):
+ msg = 'must be compatible'
+ with assert_raises_regex(TypeError, msg):
ediff1d(ary=ary,
to_end=append,
to_begin=prepend)
- @pytest.mark.parametrize("ary,"
- "prepend,"
- "append,"
- "expected", [
- (np.array([1, 2, 3], dtype=np.int16),
- 0,
- None,
- np.array([0, 1, 1], dtype=np.int16)),
- (np.array([1, 2, 3], dtype=np.int32),
- 0,
- 0,
- np.array([0, 1, 1, 0], dtype=np.int32)),
- (np.array([1, 2, 3], dtype=np.int64),
- 3,
- -9,
- np.array([3, 1, 1, -9], dtype=np.int64)),
- ])
+ @pytest.mark.parametrize(
+ "ary,prepend,append,expected",
+ [
+ (np.array([1, 2, 3], dtype=np.int16),
+ 2**16, # will be cast to int16 under same kind rule.
+ 2**16 + 4,
+ np.array([0, 1, 1, 4], dtype=np.int16)),
+ (np.array([1, 2, 3], dtype=np.float32),
+ np.array([5], dtype=np.float64),
+ None,
+ np.array([5, 1, 1], dtype=np.float32)),
+ (np.array([1, 2, 3], dtype=np.int32),
+ 0,
+ 0,
+ np.array([0, 1, 1, 0], dtype=np.int32)),
+ (np.array([1, 2, 3], dtype=np.int64),
+ 3,
+ -9,
+ np.array([3, 1, 1, -9], dtype=np.int64)),
+ ]
+ )
def test_ediff1d_scalar_handling(self,
ary,
prepend,
@@ -187,7 +189,7 @@ class TestSetOps(object):
to_end=append,
to_begin=prepend)
assert_equal(actual, expected)
-
+ assert actual.dtype == expected.dtype
def test_isin(self):
# the tests for in1d cover most of isin's behavior
@@ -197,33 +199,34 @@ class TestSetOps(object):
b = np.asarray(b).flatten().tolist()
return a in b
isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
+
def assert_isin_equal(a, b):
x = isin(a, b)
y = isin_slow(a, b)
assert_array_equal(x, y)
- #multidimensional arrays in both arguments
+ # multidimensional arrays in both arguments
a = np.arange(24).reshape([2, 3, 4])
b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
assert_isin_equal(a, b)
- #array-likes as both arguments
+ # array-likes as both arguments
c = [(9, 8), (7, 6)]
d = (9, 7)
assert_isin_equal(c, d)
- #zero-d array:
+ # zero-d array:
f = np.array(3)
assert_isin_equal(f, b)
assert_isin_equal(a, f)
assert_isin_equal(f, f)
- #scalar:
+ # scalar:
assert_isin_equal(5, b)
assert_isin_equal(a, 6)
assert_isin_equal(5, 6)
- #empty array-like:
+ # empty array-like:
x = []
assert_isin_equal(x, b)
assert_isin_equal(a, x)
@@ -410,7 +413,7 @@ class TestSetOps(object):
assert_array_equal(c1, c2)
-class TestUnique(object):
+class TestUnique:
def test_unique_1d(self):
@@ -517,7 +520,8 @@ class TestUnique(object):
a = []
a1_idx = np.unique(a, return_index=True)[1]
a2_inv = np.unique(a, return_inverse=True)[1]
- a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:]
+ a3_idx, a3_inv = np.unique(a, return_index=True,
+ return_inverse=True)[1:]
assert_equal(a1_idx.dtype, np.intp)
assert_equal(a2_inv.dtype, np.intp)
assert_equal(a3_idx.dtype, np.intp)
@@ -560,9 +564,52 @@ class TestUnique(object):
result = np.array([[-0.0, 0.0]])
assert_array_equal(unique(data, axis=0), result, msg)
+ @pytest.mark.parametrize("axis", [0, -1])
+ def test_unique_1d_with_axis(self, axis):
+ x = np.array([4, 3, 2, 3, 2, 1, 2, 2])
+ uniq = unique(x, axis=axis)
+ assert_array_equal(uniq, [1, 2, 3, 4])
+
+ def test_unique_axis_zeros(self):
+ # issue 15559
+ single_zero = np.empty(shape=(2, 0), dtype=np.int8)
+ uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True,
+ return_inverse=True, return_counts=True)
+
+ # there's 1 element of shape (0,) along axis 0
+ assert_equal(uniq.dtype, single_zero.dtype)
+ assert_array_equal(uniq, np.empty(shape=(1, 0)))
+ assert_array_equal(idx, np.array([0]))
+ assert_array_equal(inv, np.array([0, 0]))
+ assert_array_equal(cnt, np.array([2]))
+
+ # there's 0 elements of shape (2,) along axis 1
+ uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True,
+ return_inverse=True, return_counts=True)
+
+ assert_equal(uniq.dtype, single_zero.dtype)
+ assert_array_equal(uniq, np.empty(shape=(2, 0)))
+ assert_array_equal(idx, np.array([]))
+ assert_array_equal(inv, np.array([]))
+ assert_array_equal(cnt, np.array([]))
+
+ # test a "complicated" shape
+ shape = (0, 2, 0, 3, 0, 4, 0)
+ multiple_zeros = np.empty(shape=shape)
+ for axis in range(len(shape)):
+ expected_shape = list(shape)
+ if shape[axis] == 0:
+ expected_shape[axis] = 0
+ else:
+ expected_shape[axis] = 1
+
+ assert_array_equal(unique(multiple_zeros, axis=axis),
+ np.empty(shape=expected_shape))
+
def test_unique_masked(self):
# issue 8664
- x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], dtype='uint8')
+ x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0],
+ dtype='uint8')
y = np.ma.masked_equal(x, 0)
v = np.unique(y)
@@ -577,7 +624,7 @@ class TestUnique(object):
# as unsigned byte strings. See gh-10495.
fmt = "sort order incorrect for integer type '%s'"
for dt in 'bhilq':
- a = np.array([[-1],[0]], dt)
+ a = np.array([[-1], [0]], dt)
b = np.unique(a, axis=0)
assert_array_equal(a, b, fmt % dt)
diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py
index 2ce4456a5..c00ed13d7 100644
--- a/numpy/lib/tests/test_arrayterator.py
+++ b/numpy/lib/tests/test_arrayterator.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from operator import mul
from functools import reduce
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index 21088765f..26e79bc06 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -1,5 +1,4 @@
-from __future__ import division, absolute_import, print_function
-
+import warnings
from decimal import Decimal
import numpy as np
@@ -8,22 +7,35 @@ from numpy.testing import (
)
-class TestFinancial(object):
+def filter_deprecation(func):
+ def newfunc(*args, **kwargs):
+ with warnings.catch_warnings(record=True) as ws:
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ func(*args, **kwargs)
+ assert_(all(w.category is DeprecationWarning for w in ws))
+ return newfunc
+
+
+class TestFinancial:
+ @filter_deprecation
def test_npv_irr_congruence(self):
# IRR is defined as the rate required for the present value of a
# a series of cashflows to be zero i.e. NPV(IRR(x), x) = 0
cashflows = np.array([-40000, 5000, 8000, 12000, 30000])
assert_allclose(np.npv(np.irr(cashflows), cashflows), 0, atol=1e-10, rtol=0)
+ @filter_deprecation
def test_rate(self):
assert_almost_equal(
np.rate(10, 0, -3500, 10000),
0.1107, 4)
+ @filter_deprecation
def test_rate_decimal(self):
rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000'))
assert_equal(Decimal('0.1106908537142689284704528100'), rate)
+ @filter_deprecation
def test_irr(self):
v = [-150000, 15000, 25000, 35000, 45000, 60000]
assert_almost_equal(np.irr(v), 0.0524, 2)
@@ -43,20 +55,25 @@ class TestFinancial(object):
v = [-1, -2, -3]
assert_equal(np.irr(v), np.nan)
+ @filter_deprecation
def test_pv(self):
assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2)
+ @filter_deprecation
def test_pv_decimal(self):
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
Decimal('-127128.1709461939327295222005'))
+ @filter_deprecation
def test_fv(self):
assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924)
+ @filter_deprecation
def test_fv_decimal(self):
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0),
Decimal('86609.36267304300040536731624'))
+ @filter_deprecation
def test_pmt(self):
res = np.pmt(0.08 / 12, 5 * 12, 15000)
tgt = -304.145914
@@ -71,6 +88,7 @@ class TestFinancial(object):
tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]])
assert_allclose(res, tgt)
+ @filter_deprecation
def test_pmt_decimal(self):
res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000)
tgt = Decimal('-304.1459143262052370338701494')
@@ -94,18 +112,22 @@ class TestFinancial(object):
assert_equal(res[1][0], tgt[1][0])
assert_equal(res[1][1], tgt[1][1])
+ @filter_deprecation
def test_ppmt(self):
assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25)
+ @filter_deprecation
def test_ppmt_decimal(self):
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')),
Decimal('-710.2541257864217612489830917'))
# Two tests showing how Decimal is actually getting at a more exact result
# .23 / 12 does not come out nicely as a float but does as a decimal
+ @filter_deprecation
def test_ppmt_special_rate(self):
assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036)
+ @filter_deprecation
def test_ppmt_special_rate_decimal(self):
# When rounded out to 8 decimal places like the float based test, this should not equal the same value
# as the float, substituted for the decimal
@@ -118,31 +140,38 @@ class TestFinancial(object):
assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')),
Decimal('-90238044.2322778884413969909'))
+ @filter_deprecation
def test_ipmt(self):
assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67)
+ @filter_deprecation
def test_ipmt_decimal(self):
result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000)
assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667'))
+ @filter_deprecation
def test_nper(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
21.54, 2)
+ @filter_deprecation
def test_nper2(self):
assert_almost_equal(np.nper(0.0, -2000, 0, 100000.),
50.0, 1)
+ @filter_deprecation
def test_npv(self):
assert_almost_equal(
np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]),
122.89, 2)
+ @filter_deprecation
def test_npv_decimal(self):
assert_equal(
np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]),
Decimal('122.894854950942692161628715'))
+ @filter_deprecation
def test_mirr(self):
val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000]
assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
@@ -156,6 +185,7 @@ class TestFinancial(object):
val = [39000, 30000, 21000, 37000, 46000]
assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
+ @filter_deprecation
def test_mirr_decimal(self):
val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'),
Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'),
@@ -174,6 +204,7 @@ class TestFinancial(object):
val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')]
assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12'))))
+ @filter_deprecation
def test_when(self):
# begin
assert_equal(np.rate(10, 20, -3500, 10000, 1),
@@ -238,6 +269,7 @@ class TestFinancial(object):
assert_equal(np.nper(0.075, -2000, 0, 100000., 0),
np.nper(0.075, -2000, 0, 100000., 'end'))
+ @filter_deprecation
def test_decimal_with_when(self):
"""Test that decimals are still supported if the when argument is passed"""
# begin
@@ -312,6 +344,7 @@ class TestFinancial(object):
np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
Decimal('0'), 'end').flat[0])
+ @filter_deprecation
def test_broadcast(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
[21.5449442, 20.76156441], 4)
@@ -329,6 +362,7 @@ class TestFinancial(object):
[-74.998201, -75.62318601, -75.62318601,
-76.88882405, -76.88882405], 4)
+ @filter_deprecation
def test_broadcast_decimal(self):
# Use almost equal because precision is tested in the explicit tests, this test is to ensure
# broadcast with Decimal is not broken.
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 062c21725..2dbaeb8cb 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
# doctest
r''' Test the .npy file format.
@@ -537,8 +535,10 @@ dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4']*3})
# titles
dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
'offsets': [1, 6], 'titles': ['aa', 'bb']})
+# empty
+dt6 = np.dtype({'names': [], 'formats': [], 'itemsize': 8})
-@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5])
+@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5, dt6])
def test_load_padded_dtype(dt):
arr = np.zeros(3, dt)
for i in range(3):
@@ -550,10 +550,7 @@ def test_load_padded_dtype(dt):
def test_python2_python3_interoperability():
- if sys.version_info[0] >= 3:
- fname = 'win64python2.npy'
- else:
- fname = 'python3.npy'
+ fname = 'win64python2.npy'
path = os.path.join(os.path.dirname(__file__), 'data', fname)
data = np.load(path)
assert_array_equal(data, np.ones(2))
@@ -563,13 +560,7 @@ def test_pickle_python2_python3():
# Python 2 and Python 3 and vice versa
data_dir = os.path.join(os.path.dirname(__file__), 'data')
- if sys.version_info[0] >= 3:
- xrange = range
- else:
- import __builtin__
- xrange = __builtin__.xrange
-
- expected = np.array([None, xrange, u'\u512a\u826f',
+ expected = np.array([None, range, u'\u512a\u826f',
b'\xe4\xb8\x8d\xe8\x89\xaf'],
dtype=object)
@@ -585,34 +576,30 @@ def test_pickle_python2_python3():
else:
data = data_f
- if sys.version_info[0] >= 3:
- if encoding == 'latin1' and fname.startswith('py2'):
- assert_(isinstance(data[3], str))
- assert_array_equal(data[:-1], expected[:-1])
- # mojibake occurs
- assert_array_equal(data[-1].encode(encoding), expected[-1])
- else:
- assert_(isinstance(data[3], bytes))
- assert_array_equal(data, expected)
+ if encoding == 'latin1' and fname.startswith('py2'):
+ assert_(isinstance(data[3], str))
+ assert_array_equal(data[:-1], expected[:-1])
+ # mojibake occurs
+ assert_array_equal(data[-1].encode(encoding), expected[-1])
else:
+ assert_(isinstance(data[3], bytes))
assert_array_equal(data, expected)
- if sys.version_info[0] >= 3:
- if fname.startswith('py2'):
- if fname.endswith('.npz'):
- data = np.load(path, allow_pickle=True)
- assert_raises(UnicodeError, data.__getitem__, 'x')
- data.close()
- data = np.load(path, allow_pickle=True, fix_imports=False,
- encoding='latin1')
- assert_raises(ImportError, data.__getitem__, 'x')
- data.close()
- else:
- assert_raises(UnicodeError, np.load, path,
- allow_pickle=True)
- assert_raises(ImportError, np.load, path,
- allow_pickle=True, fix_imports=False,
- encoding='latin1')
+ if fname.startswith('py2'):
+ if fname.endswith('.npz'):
+ data = np.load(path, allow_pickle=True)
+ assert_raises(UnicodeError, data.__getitem__, 'x')
+ data.close()
+ data = np.load(path, allow_pickle=True, fix_imports=False,
+ encoding='latin1')
+ assert_raises(ImportError, data.__getitem__, 'x')
+ data.close()
+ else:
+ assert_raises(UnicodeError, np.load, path,
+ allow_pickle=True)
+ assert_raises(ImportError, np.load, path,
+ allow_pickle=True, fix_imports=False,
+ encoding='latin1')
def test_pickle_disallow():
@@ -963,3 +950,33 @@ def test_unicode_field_names():
with open(fname, 'wb') as f:
with assert_warns(UserWarning):
format.write_array(f, arr, version=None)
+
+
+@pytest.mark.parametrize('dt, fail', [
+ (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',
+ metadata={'some': 'stuff'})]}), True),
+ (np.dtype(int, metadata={'some': 'stuff'}), False),
+ (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False),
+ # recursive: metadata on the field of a dtype
+ (np.dtype({'names': ['a', 'b'], 'formats': [
+ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]})
+ ]}), False)
+ ])
+def test_metadata_dtype(dt, fail):
+ # gh-14142
+ arr = np.ones(10, dtype=dt)
+ buf = BytesIO()
+ with assert_warns(UserWarning):
+ np.save(buf, arr)
+ buf.seek(0)
+ if fail:
+ with assert_raises(ValueError):
+ np.load(buf)
+ else:
+ arr2 = np.load(buf)
+ # BUG: assert_array_equal does not check metadata
+ from numpy.lib.format import _has_metadata
+ assert_array_equal(arr, arr2)
+ assert _has_metadata(arr.dtype)
+ assert not _has_metadata(arr2.dtype)
+
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 1eae8ccfb..23bf3296d 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1,10 +1,7 @@
-from __future__ import division, absolute_import, print_function
-
import operator
import warnings
import sys
import decimal
-import types
from fractions import Fraction
import pytest
@@ -24,9 +21,6 @@ from numpy.lib import (
select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize
)
-from numpy.compat import long
-
-PY2 = sys.version_info[0] == 2
def get_mat(n):
data = np.arange(n)
@@ -45,7 +39,7 @@ def _make_complex(real, imag):
return ret
-class TestRot90(object):
+class TestRot90:
def test_basic(self):
assert_raises(ValueError, rot90, np.ones(4))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
@@ -113,7 +107,7 @@ class TestRot90(object):
rot90(a_rot90_20, k=k-1, axes=(2, 0)))
-class TestFlip(object):
+class TestFlip:
def test_axes(self):
assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)
@@ -216,7 +210,7 @@ class TestFlip(object):
assert_equal(np.flip(a, axis=(1, 2)), c)
-class TestAny(object):
+class TestAny:
def test_basic(self):
y1 = [0, 0, 1, 0]
@@ -233,7 +227,7 @@ class TestAny(object):
assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
-class TestAll(object):
+class TestAll:
def test_basic(self):
y1 = [0, 1, 1, 0]
@@ -251,7 +245,7 @@ class TestAll(object):
assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
-class TestCopy(object):
+class TestCopy:
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
@@ -278,8 +272,15 @@ class TestCopy(object):
assert_(not a_fort_copy.flags.c_contiguous)
assert_(a_fort_copy.flags.f_contiguous)
+ def test_subok(self):
+ mx = ma.ones(5)
+ assert_(not ma.isMaskedArray(np.copy(mx, subok=False)))
+ assert_(ma.isMaskedArray(np.copy(mx, subok=True)))
+ # Default behavior
+ assert_(not ma.isMaskedArray(np.copy(mx)))
-class TestAverage(object):
+
+class TestAverage:
def test_basic(self):
y1 = np.array([1, 2, 3])
@@ -380,7 +381,7 @@ class TestAverage(object):
w /= w.sum()
assert_almost_equal(a.mean(0), average(a, weights=w))
-class TestSelect(object):
+class TestSelect:
choices = [np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([7, 8, 9])]
@@ -442,7 +443,7 @@ class TestSelect(object):
select(conditions, choices)
-class TestInsert(object):
+class TestInsert:
def test_basic(self):
a = [1, 2, 3]
@@ -507,12 +508,11 @@ class TestInsert(object):
insert(a, 1, a[:, 2, :], axis=1))
def test_0d(self):
- # This is an error in the future
a = np.array(1)
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
- assert_equal(insert(a, [], 2, axis=0), np.array(2))
- assert_(w[0].category is DeprecationWarning)
+ with pytest.raises(np.AxisError):
+ insert(a, [], 2, axis=0)
+ with pytest.raises(TypeError):
+ insert(a, [], 2, axis="nonsense")
def test_subclass(self):
class SubClass(np.ndarray):
@@ -542,8 +542,14 @@ class TestInsert(object):
b = np.insert(a, [0, 2], val)
assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
+ def test_index_floats(self):
+ with pytest.raises(IndexError):
+ np.insert([0, 1, 2], np.array([1.0, 2.0]), [10, 20])
+ with pytest.raises(IndexError):
+ np.insert([0, 1, 2], np.array([], dtype=float), [])
-class TestAmax(object):
+
+class TestAmax:
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -555,7 +561,7 @@ class TestAmax(object):
assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
-class TestAmin(object):
+class TestAmin:
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -567,7 +573,7 @@ class TestAmin(object):
assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
-class TestPtp(object):
+class TestPtp:
def test_basic(self):
a = np.array([3, 4, 5, 10, -3, -5, 6.0])
@@ -582,7 +588,7 @@ class TestPtp(object):
assert_equal(b.ptp(axis=(0,1), keepdims=True), [[8.0]])
-class TestCumsum(object):
+class TestCumsum:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -605,7 +611,7 @@ class TestCumsum(object):
assert_array_equal(np.cumsum(a2, axis=1), tgt)
-class TestProd(object):
+class TestProd:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -625,7 +631,7 @@ class TestProd(object):
np.array([24, 1890, 600], ctype))
-class TestCumprod(object):
+class TestCumprod:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -652,7 +658,7 @@ class TestCumprod(object):
[10, 30, 120, 600]], ctype))
-class TestDiff(object):
+class TestDiff:
def test_basic(self):
x = [1, 4, 6, 7, 12]
@@ -792,7 +798,7 @@ class TestDiff(object):
assert_raises(np.AxisError, diff, x, append=0, axis=3)
-class TestDelete(object):
+class TestDelete:
def setup(self):
self.a = np.arange(5)
@@ -802,10 +808,6 @@ class TestDelete(object):
a_del = delete(self.a, indices)
nd_a_del = delete(self.nd_a, indices, axis=1)
msg = 'Delete failed for obj: %r' % indices
- # NOTE: The cast should be removed after warning phase for bools
- if not isinstance(indices, (slice, int, long, np.integer)):
- indices = np.asarray(indices, dtype=np.intp)
- indices = indices[(indices >= 0) & (indices < 5)]
assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a,
err_msg=msg)
xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0])
@@ -821,19 +823,25 @@ class TestDelete(object):
self._check_inverse_of_slicing(s)
def test_fancy(self):
- # Deprecation/FutureWarning tests should be kept after change.
self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]]))
- with warnings.catch_warnings():
- warnings.filterwarnings('error', category=DeprecationWarning)
- assert_raises(DeprecationWarning, delete, self.a, [100])
- assert_raises(DeprecationWarning, delete, self.a, [-100])
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', category=FutureWarning)
- self._check_inverse_of_slicing([0, -1, 2, 2])
- obj = np.array([True, False, False], dtype=bool)
- self._check_inverse_of_slicing(obj)
- assert_(w[0].category is FutureWarning)
- assert_(w[1].category is FutureWarning)
+ with pytest.raises(IndexError):
+ delete(self.a, [100])
+ with pytest.raises(IndexError):
+ delete(self.a, [-100])
+
+ self._check_inverse_of_slicing([0, -1, 2, 2])
+
+ self._check_inverse_of_slicing([True, False, False, True, False])
+
+ # not legal, indexing with these would change the dimension
+ with pytest.raises(ValueError):
+ delete(self.a, True)
+ with pytest.raises(ValueError):
+ delete(self.a, False)
+
+ # not enough items
+ with pytest.raises(ValueError):
+ delete(self.a, [False]*4)
def test_single(self):
self._check_inverse_of_slicing(0)
@@ -841,10 +849,10 @@ class TestDelete(object):
def test_0d(self):
a = np.array(1)
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
- assert_equal(delete(a, [], axis=0), a)
- assert_(w[0].category is DeprecationWarning)
+ with pytest.raises(np.AxisError):
+ delete(a, [], axis=0)
+ with pytest.raises(TypeError):
+ delete(a, [], axis="nonsense")
def test_subclass(self):
class SubClass(np.ndarray):
@@ -866,8 +874,14 @@ class TestDelete(object):
assert_equal(m.flags.c_contiguous, k.flags.c_contiguous)
assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
+ def test_index_floats(self):
+ with pytest.raises(IndexError):
+ np.delete([0, 1, 2], np.array([1.0, 2.0]))
+ with pytest.raises(IndexError):
+ np.delete([0, 1, 2], np.array([], dtype=float))
-class TestGradient(object):
+
+class TestGradient:
def test_basic(self):
v = [[1, 1], [3, 4]]
@@ -1084,8 +1098,42 @@ class TestGradient(object):
assert_raises(ValueError, gradient, np.arange(1), edge_order=2)
assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
-
-class TestAngle(object):
+ @pytest.mark.parametrize('f_dtype', [np.uint8, np.uint16,
+ np.uint32, np.uint64])
+ def test_f_decreasing_unsigned_int(self, f_dtype):
+ f = np.array([5, 4, 3, 2, 1], dtype=f_dtype)
+ g = gradient(f)
+ assert_array_equal(g, [-1]*len(f))
+
+ @pytest.mark.parametrize('f_dtype', [np.int8, np.int16,
+ np.int32, np.int64])
+ def test_f_signed_int_big_jump(self, f_dtype):
+ maxint = np.iinfo(f_dtype).max
+ x = np.array([1, 3])
+ f = np.array([-1, maxint], dtype=f_dtype)
+ dfdx = gradient(f, x)
+ assert_array_equal(dfdx, [(maxint + 1) // 2]*2)
+
+ @pytest.mark.parametrize('x_dtype', [np.uint8, np.uint16,
+ np.uint32, np.uint64])
+ def test_x_decreasing_unsigned(self, x_dtype):
+ x = np.array([3, 2, 1], dtype=x_dtype)
+ f = np.array([0, 2, 4])
+ dfdx = gradient(f, x)
+ assert_array_equal(dfdx, [-2]*len(x))
+
+ @pytest.mark.parametrize('x_dtype', [np.int8, np.int16,
+ np.int32, np.int64])
+ def test_x_signed_int_big_jump(self, x_dtype):
+ minint = np.iinfo(x_dtype).min
+ maxint = np.iinfo(x_dtype).max
+ x = np.array([-1, maxint], dtype=x_dtype)
+ f = np.array([minint // 2, 0])
+ dfdx = gradient(f, x)
+ assert_array_equal(dfdx, [0.5, 0.5])
+
+
+class TestAngle:
def test_basic(self):
x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
@@ -1111,7 +1159,7 @@ class TestAngle(object):
assert_equal(actual, expected)
-class TestTrimZeros(object):
+class TestTrimZeros:
"""
Only testing for integer splits.
@@ -1134,7 +1182,7 @@ class TestTrimZeros(object):
assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4]))
-class TestExtins(object):
+class TestExtins:
def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3])
@@ -1173,7 +1221,7 @@ class TestExtins(object):
assert_array_equal(a, ac)
-class TestVectorize(object):
+class TestVectorize:
def test_simple(self):
def addsubtract(a, b):
@@ -1505,8 +1553,8 @@ class TestVectorize(object):
f(x)
-class TestLeaks(object):
- class A(object):
+class TestLeaks:
+ class A:
iters = 20
def bound(self, *args):
@@ -1537,18 +1585,15 @@ class TestLeaks(object):
a.f = np.frompyfunc(getattr(a, name), 1, 1)
out = a.f(np.arange(10))
a = None
- if PY2:
- assert_equal(sys.getrefcount(A_func), refcount)
- else:
- # A.func is part of a reference cycle if incr is non-zero
- assert_equal(sys.getrefcount(A_func), refcount + incr)
+ # A.func is part of a reference cycle if incr is non-zero
+ assert_equal(sys.getrefcount(A_func), refcount + incr)
for i in range(5):
gc.collect()
assert_equal(sys.getrefcount(A_func), refcount)
finally:
gc.enable()
-class TestDigitize(object):
+class TestDigitize:
def test_forward(self):
x = np.arange(-6, 5)
@@ -1633,7 +1678,7 @@ class TestDigitize(object):
assert_equal(np.digitize(x, [x + 1, x - 1]), 1)
-class TestUnwrap(object):
+class TestUnwrap:
def test_simple(self):
# check that unwrap removes jumps greater that 2*pi
@@ -1642,7 +1687,7 @@ class TestUnwrap(object):
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
-class TestFilterwindows(object):
+class TestFilterwindows:
def test_hanning(self):
# check symmetry
@@ -1673,7 +1718,7 @@ class TestFilterwindows(object):
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
-class TestTrapz(object):
+class TestTrapz:
def test_simple(self):
x = np.arange(-10, 10, .1)
@@ -1735,7 +1780,7 @@ class TestTrapz(object):
assert_almost_equal(trapz(y, xm), r)
-class TestSinc(object):
+class TestSinc:
def test_simple(self):
assert_(sinc(0) == 1)
@@ -1752,7 +1797,7 @@ class TestSinc(object):
assert_array_equal(y1, y3)
-class TestUnique(object):
+class TestUnique:
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
@@ -1764,7 +1809,7 @@ class TestUnique(object):
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
-class TestCheckFinite(object):
+class TestCheckFinite:
def test_simple(self):
a = [1, 2, 3]
@@ -1781,7 +1826,7 @@ class TestCheckFinite(object):
assert_(a.dtype == np.float64)
-class TestCorrCoef(object):
+class TestCorrCoef:
A = np.array(
[[0.15391142, 0.18045767, 0.14197213],
[0.70461506, 0.96474128, 0.27906989],
@@ -1866,14 +1911,14 @@ class TestCorrCoef(object):
assert_(np.all(np.abs(c) <= 1.0))
-class TestCov(object):
+class TestCov:
x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
res1 = np.array([[1., -1.], [-1., 1.]])
x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
frequencies = np.array([1, 4, 1])
x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T
res2 = np.array([[0.4, -0.4], [-0.4, 0.4]])
- unit_frequencies = np.ones(3, dtype=np.integer)
+ unit_frequencies = np.ones(3, dtype=np.int_)
weights = np.array([1.0, 4.0, 1.0])
res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]])
unit_weights = np.ones(3)
@@ -1926,11 +1971,11 @@ class TestCov(object):
self.res1)
nonint = self.frequencies + 0.5
assert_raises(TypeError, cov, self.x1, fweights=nonint)
- f = np.ones((2, 3), dtype=np.integer)
+ f = np.ones((2, 3), dtype=np.int_)
assert_raises(RuntimeError, cov, self.x1, fweights=f)
- f = np.ones(2, dtype=np.integer)
+ f = np.ones(2, dtype=np.int_)
assert_raises(RuntimeError, cov, self.x1, fweights=f)
- f = -1 * np.ones(3, dtype=np.integer)
+ f = -1 * np.ones(3, dtype=np.int_)
assert_raises(ValueError, cov, self.x1, fweights=f)
def test_aweights(self):
@@ -1966,7 +2011,7 @@ class TestCov(object):
self.res1)
-class Test_I0(object):
+class Test_I0:
def test_simple(self):
assert_almost_equal(
@@ -2012,7 +2057,7 @@ class Test_I0(object):
assert_array_equal(exp, res)
-class TestKaiser(object):
+class TestKaiser:
def test_simple(self):
assert_(np.isfinite(kaiser(1, 1.0)))
@@ -2031,7 +2076,7 @@ class TestKaiser(object):
kaiser(3, 4)
-class TestMsort(object):
+class TestMsort:
def test_simple(self):
A = np.array([[0.44567325, 0.79115165, 0.54900530],
@@ -2044,7 +2089,7 @@ class TestMsort(object):
[0.64864341, 0.79115165, 0.96098397]]))
-class TestMeshgrid(object):
+class TestMeshgrid:
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
@@ -2133,7 +2178,7 @@ class TestMeshgrid(object):
assert_equal(x[1, :], X)
-class TestPiecewise(object):
+class TestPiecewise:
def test_simple(self):
# Condition is single bool list
@@ -2225,7 +2270,7 @@ class TestPiecewise(object):
[3., 3., 1.]]))
-class TestBincount(object):
+class TestBincount:
def test_simple(self):
y = np.bincount(np.arange(4))
@@ -2312,7 +2357,7 @@ class TestBincount(object):
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
-class TestInterp(object):
+class TestInterp:
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
@@ -2511,7 +2556,7 @@ def compare_results(res, desired):
assert_array_equal(res[i], desired[i])
-class TestPercentile(object):
+class TestPercentile:
def test_basic(self):
x = np.arange(8) * 0.5
@@ -2523,7 +2568,7 @@ class TestPercentile(object):
assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan)
def test_fraction(self):
- x = [Fraction(i, 2) for i in np.arange(8)]
+ x = [Fraction(i, 2) for i in range(8)]
p = np.percentile(x, Fraction(0))
assert_equal(p, Fraction(0))
@@ -2932,7 +2977,7 @@ class TestPercentile(object):
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
-class TestQuantile(object):
+class TestQuantile:
# most of this is already tested by TestPercentile
def test_basic(self):
@@ -2941,9 +2986,19 @@ class TestQuantile(object):
assert_equal(np.quantile(x, 1), 3.5)
assert_equal(np.quantile(x, 0.5), 1.75)
+ def test_correct_quantile_value(self):
+ a = np.array([True])
+ tf_quant = np.quantile(True, False)
+ assert_equal(tf_quant, a[0])
+ assert_equal(type(tf_quant), a.dtype)
+ a = np.array([False, True, True])
+ quant_res = np.quantile(a, a)
+ assert_array_equal(quant_res, a)
+ assert_equal(a.dtype, quant_res.dtype)
+
def test_fraction(self):
# fractional input, integral quantile
- x = [Fraction(i, 2) for i in np.arange(8)]
+ x = [Fraction(i, 2) for i in range(8)]
q = np.quantile(x, 0)
assert_equal(q, 0)
@@ -2974,7 +3029,7 @@ class TestQuantile(object):
assert_array_equal(p, p0)
-class TestMedian(object):
+class TestMedian:
def test_basic(self):
a0 = np.array(1)
@@ -3213,7 +3268,7 @@ class TestMedian(object):
(1, 1, 7, 1))
-class TestAdd_newdoc_ufunc(object):
+class TestAdd_newdoc_ufunc:
def test_ufunc_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
@@ -3223,7 +3278,7 @@ class TestAdd_newdoc_ufunc(object):
assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
-class TestAdd_newdoc(object):
+class TestAdd_newdoc:
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
@@ -3234,7 +3289,7 @@ class TestAdd_newdoc(object):
assert_(len(np.core.ufunc.identity.__doc__) > 300)
assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
-class TestSortComplex(object):
+class TestSortComplex:
@pytest.mark.parametrize("type_in, type_out", [
('l', 'D'),
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index 4895a722c..fc16b7396 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
@@ -8,9 +6,10 @@ from numpy.testing import (
assert_array_almost_equal, assert_raises, assert_allclose,
assert_array_max_ulp, assert_raises_regex, suppress_warnings,
)
+import pytest
-class TestHistogram(object):
+class TestHistogram:
def setup(self):
pass
@@ -82,7 +81,7 @@ class TestHistogram(object):
a, b = histogram(v, bins, density=False)
assert_array_equal(a, [1, 2, 3, 4])
- # Variale bin widths are especially useful to deal with
+ # Variable bin widths are especially useful to deal with
# infinities.
v = np.arange(10)
bins = [0, 1, 3, 6, np.inf]
@@ -423,7 +422,7 @@ class TestHistogram(object):
assert_array_equal(edges, e)
-class TestHistogramOptimBinNums(object):
+class TestHistogramOptimBinNums:
"""
Provide test coverage when using provided estimators for optimal number of
bins
@@ -591,6 +590,16 @@ class TestHistogramOptimBinNums(object):
msg += " with datasize of {0}".format(testlen)
assert_equal(len(a), numbins, err_msg=msg)
+ @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
+ 'stone', 'rice', 'sturges'])
+ def test_signed_integer_data(self, bins):
+ # Regression test for gh-14379.
+ a = np.array([-2, 0, 127], dtype=np.int8)
+ hist, edges = np.histogram(a, bins=bins)
+ hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
+ assert_array_equal(hist, hist32)
+ assert_array_equal(edges, edges32)
+
def test_simple_weighted(self):
"""
Check that weighted data raises a TypeError
@@ -601,7 +610,7 @@ class TestHistogramOptimBinNums(object):
estimator, weights=[1, 2, 3])
-class TestHistogramdd(object):
+class TestHistogramdd:
def test_simple(self):
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index dbe445c2c..905165a99 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import pytest
import numpy as np
@@ -14,7 +12,7 @@ from numpy.lib.index_tricks import (
)
-class TestRavelUnravelIndex(object):
+class TestRavelUnravelIndex:
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
@@ -194,7 +192,7 @@ class TestRavelUnravelIndex(object):
with assert_raises(ValueError):
np.unravel_index([1], (2, 1, 0))
-class TestGrid(object):
+class TestGrid:
def test_basic(self):
a = mgrid[-1:1:10j]
b = mgrid[-1:1:0.1]
@@ -252,7 +250,7 @@ class TestGrid(object):
assert_equal(grid_small.size, expected[1])
-class TestConcatenator(object):
+class TestConcatenator:
def test_1d(self):
assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
b = np.ones(5)
@@ -290,14 +288,14 @@ class TestConcatenator(object):
assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3])
-class TestNdenumerate(object):
+class TestNdenumerate:
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(list(ndenumerate(a)),
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
-class TestIndexExpression(object):
+class TestIndexExpression:
def test_regression_1(self):
# ticket #1196
a = np.arange(2)
@@ -311,7 +309,7 @@ class TestIndexExpression(object):
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
-class TestIx_(object):
+class TestIx_:
def test_regression_1(self):
# Test empty untyped inputs create outputs of indexing type, gh-5804
a, = np.ix_(range(0))
@@ -358,7 +356,7 @@ def test_c_():
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
-class TestFillDiagonal(object):
+class TestFillDiagonal:
def test_basic(self):
a = np.zeros((3, 3), int)
fill_diagonal(a, 5)
@@ -457,7 +455,7 @@ def test_diag_indices():
)
-class TestDiagIndicesFrom(object):
+class TestDiagIndicesFrom:
def test_diag_indices_from(self):
x = np.random.random((4, 4))
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 1181fe986..8ce20a116 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1,6 +1,5 @@
-from __future__ import division, absolute_import, print_function
-
import sys
+import gc
import gzip
import os
import threading
@@ -9,21 +8,24 @@ import warnings
import io
import re
import pytest
+from pathlib import Path
from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
+from multiprocessing import Process
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
-from numpy.compat import asbytes, bytes, Path
+from numpy.compat import asbytes, bytes
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings
)
+from numpy.testing._private.utils import requires_memory
class TextIO(BytesIO):
@@ -45,7 +47,6 @@ class TextIO(BytesIO):
BytesIO.writelines(self, [asbytes(s) for s in lines])
-MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
try:
import bz2
@@ -70,7 +71,7 @@ def strptime(s, fmt=None):
return datetime(*time.strptime(s, fmt)[:3])
-class RoundtripTest(object):
+class RoundtripTest:
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
@@ -277,8 +278,6 @@ class TestSavezLoad(RoundtripTest):
fp.seek(0)
assert_(not fp.closed)
- #FIXME: Is this still true?
- @pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy")
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
@@ -291,17 +290,18 @@ class TestSavezLoad(RoundtripTest):
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
- # collector, so we catch the warnings. Because ResourceWarning
- # is unknown in Python < 3.x, we take the easy way out and
- # catch all warnings.
+ # collector, so we catch the warnings.
with suppress_warnings() as sup:
- sup.filter(Warning) # TODO: specify exact message
+ sup.filter(ResourceWarning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
+ finally:
+ if IS_PYPY:
+ gc.collect()
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it. This needs to
@@ -317,7 +317,7 @@ class TestSavezLoad(RoundtripTest):
assert_(fp.closed)
-class TestSaveTxt(object):
+class TestSaveTxt:
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
@@ -364,7 +364,6 @@ class TestSaveTxt(object):
c.seek(0)
assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
- @pytest.mark.skipif(Path is None, reason="No pathlib.Path")
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
v = a[['x', 'z']]
@@ -518,7 +517,7 @@ class TestSaveTxt(object):
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
@@ -526,26 +525,24 @@ class TestSaveTxt(object):
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
- # stdlib 2 versions do not support encoding
- if MAJVER > 2:
- if HAS_BZ2:
- suffixes.append('.bz2')
- if HAS_LZMA:
- suffixes.extend(['.xz', '.lzma'])
+ if HAS_BZ2:
+ suffixes.append('.bz2')
+ if HAS_LZMA:
+ suffixes.extend(['.xz', '.lzma'])
with tempdir() as tmpdir:
for suffix in suffixes:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
- encoding='UTF-16-LE', dtype=np.unicode)
+ encoding='UTF-16-LE', dtype=np.unicode_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -553,7 +550,7 @@ class TestSaveTxt(object):
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -572,22 +569,23 @@ class TestSaveTxt(object):
else:
assert_equal(s.read(), b"%f\n" % 1.)
- @pytest.mark.skipif(sys.platform=='win32',
- reason="large files cause problems")
+ @pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work")
@pytest.mark.slow
+ @requires_memory(free_bytes=7e9)
def test_large_zip(self):
- # The test takes at least 6GB of memory, writes a file larger than 4GB
- try:
- a = 'a' * 6 * 1024 * 1024 * 1024
- del a
- except (MemoryError, OverflowError):
- pytest.skip("Cannot allocate enough memory for test")
- test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
- for i in range(800000)])
- with tempdir() as tmpdir:
- np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data)
-
-class LoadTxtBase(object):
+ def check_large_zip():
+ # The test takes at least 6GB of memory, writes a file larger than 4GB
+ test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
+ for i in range(800000)], dtype=object)
+ with tempdir() as tmpdir:
+ np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data)
+ # run in a subprocess to ensure memory is released on PyPy, see gh-15775
+ p = Process(target=check_large_zip)
+ p.start()
+ p.join()
+ assert p.exitcode == 0
+
+class LoadTxtBase:
def check_compressed(self, fopen, suffixes):
# Test that we can load data from a compressed file
wanted = np.arange(6).reshape((2, 3))
@@ -604,18 +602,14 @@ class LoadTxtBase(object):
res = self.loadfunc(f)
assert_array_equal(res, wanted)
- # Python2 .open does not support encoding
- @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
- @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
- @pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
@@ -632,12 +626,12 @@ class LoadTxtBase(object):
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
- x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode)
+ x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
- v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16')
+ v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
@@ -645,7 +639,7 @@ class LoadTxtBase(object):
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
- x = self.loadfunc(c, dtype=np.unicode,
+ x = self.loadfunc(c, dtype=np.unicode_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
@@ -656,7 +650,7 @@ class LoadTxtBase(object):
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
- x = self.loadfunc(path, dtype=np.unicode,
+ x = self.loadfunc(path, dtype=np.unicode_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
@@ -829,7 +823,7 @@ class TestLoadTxt(LoadTxtBase):
assert_array_equal(x, a[:, 1])
# Testing with some crazy custom integer type
- class CrazyInt(object):
+ class CrazyInt:
def __index__(self):
return 1
@@ -1104,7 +1098,7 @@ class TestLoadTxt(LoadTxtBase):
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
- x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode)
+ x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
@@ -1161,7 +1155,7 @@ class TestLoadTxt(LoadTxtBase):
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
assert_array_equal(x, a)
-class Testfromregex(object):
+class Testfromregex:
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
@@ -1587,7 +1581,7 @@ M 33 21.99
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
- usecols=(2, 3), converters={2: np.unicode},
+ usecols=(2, 3), converters={2: np.compat.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
@@ -2126,7 +2120,7 @@ M 33 21.99
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
- dtype=np.unicode)
+ dtype=np.unicode_)
assert_array_equal(test, ctl)
# test a mixed dtype
@@ -2169,7 +2163,7 @@ M 33 21.99
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
- dtype=np.unicode)
+ dtype=np.unicode_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
@@ -2344,15 +2338,14 @@ M 33 21.99
assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
- assert_(test.dtype['f2'] == np.integer)
+ assert_(test.dtype['f2'] == np.int_)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
-@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
-class TestPathUsage(object):
+class TestPathUsage:
# Test that pathlib.Path can be used
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
@@ -2485,7 +2478,7 @@ def test_gzip_load():
# These next two classes encode the minimal API needed to save()/load() arrays.
# The `test_ducktyping` ensures they work correctly
-class JustWriter(object):
+class JustWriter:
def __init__(self, base):
self.base = base
@@ -2495,7 +2488,7 @@ class JustWriter(object):
def flush(self):
return self.base.flush()
-class JustReader(object):
+class JustReader:
def __init__(self, base):
self.base = base
diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py
index 3dd5346b6..632058763 100644
--- a/numpy/lib/tests/test_mixins.py
+++ b/numpy/lib/tests/test_mixins.py
@@ -1,16 +1,10 @@
-from __future__ import division, absolute_import, print_function
-
import numbers
import operator
-import sys
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
-PY2 = sys.version_info.major < 3
-
-
# NOTE: This class should be kept as an exact copy of the example from the
# docstring for NDArrayOperatorsMixin.
@@ -86,7 +80,6 @@ _ALL_BINARY_OPERATORS = [
operator.mul,
operator.truediv,
operator.floordiv,
- # TODO: test div on Python 2, only
operator.mod,
divmod,
pow,
@@ -98,7 +91,7 @@ _ALL_BINARY_OPERATORS = [
]
-class TestNDArrayOperatorsMixin(object):
+class TestNDArrayOperatorsMixin:
def test_array_like_add(self):
@@ -128,7 +121,7 @@ class TestNDArrayOperatorsMixin(object):
def test_opt_out(self):
- class OptOut(object):
+ class OptOut:
"""Object that opts out of __array_ufunc__."""
__array_ufunc__ = None
@@ -204,11 +197,10 @@ class TestNDArrayOperatorsMixin(object):
array_like = ArrayLike(array)
expected = ArrayLike(np.float64(5))
_assert_equal_type_and_value(expected, np.matmul(array_like, array))
- if not PY2:
- _assert_equal_type_and_value(
- expected, operator.matmul(array_like, array))
- _assert_equal_type_and_value(
- expected, operator.matmul(array, array_like))
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array_like, array))
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array, array_like))
def test_ufunc_at(self):
array = ArrayLike(np.array([1, 2, 3, 4]))
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index b7261c63f..db563e30c 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -1,10 +1,8 @@
-from __future__ import division, absolute_import, print_function
-
import warnings
import pytest
import numpy as np
-from numpy.lib.nanfunctions import _nan_mask
+from numpy.lib.nanfunctions import _nan_mask, _replace_nan
from numpy.testing import (
assert_, assert_equal, assert_almost_equal, assert_no_warnings,
assert_raises, assert_array_equal, suppress_warnings
@@ -37,7 +35,7 @@ _ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
-class TestNanFunctions_MinMax(object):
+class TestNanFunctions_MinMax:
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
@@ -171,7 +169,7 @@ class TestNanFunctions_MinMax(object):
assert_(issubclass(w[0].category, RuntimeWarning))
-class TestNanFunctions_ArgminArgmax(object):
+class TestNanFunctions_ArgminArgmax:
nanfuncs = [np.nanargmin, np.nanargmax]
@@ -233,7 +231,7 @@ class TestNanFunctions_ArgminArgmax(object):
assert_(res.shape == ())
-class TestNanFunctions_IntTypes(object):
+class TestNanFunctions_IntTypes:
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
@@ -308,7 +306,7 @@ class TestNanFunctions_IntTypes(object):
assert_equal(np.nanstd(mat, ddof=1), tgt)
-class SharedNanFunctionsTestsMixin(object):
+class SharedNanFunctionsTestsMixin:
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
@@ -590,7 +588,7 @@ class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
assert_(len(w) == 0)
-class TestNanFunctions_Median(object):
+class TestNanFunctions_Median:
def test_mutation(self):
# Check that passed array is not modified.
@@ -754,7 +752,7 @@ class TestNanFunctions_Median(object):
([np.nan] * i) + [-inf] * j)
-class TestNanFunctions_Percentile(object):
+class TestNanFunctions_Percentile:
def test_mutation(self):
# Check that passed array is not modified.
@@ -893,7 +891,7 @@ class TestNanFunctions_Percentile(object):
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
-class TestNanFunctions_Quantile(object):
+class TestNanFunctions_Quantile:
# most of this is already tested by TestPercentile
def test_regression(self):
@@ -953,3 +951,30 @@ def test__nan_mask(arr, expected):
# for types that can't possibly contain NaN
if type(expected) is not np.ndarray:
assert actual is True
+
+
+def test__replace_nan():
+ """ Test that _replace_nan returns the original array if there are no
+ NaNs, not a copy.
+ """
+ for dtype in [np.bool, np.int32, np.int64]:
+ arr = np.array([0, 1], dtype=dtype)
+ result, mask = _replace_nan(arr, 0)
+ assert mask is None
+ # do not make a copy if there are no nans
+ assert result is arr
+
+ for dtype in [np.float32, np.float64]:
+ arr = np.array([0, 1], dtype=dtype)
+ result, mask = _replace_nan(arr, 2)
+ assert (mask == False).all()
+ # mask is not None, so we make a copy
+ assert result is not arr
+ assert_equal(result, arr)
+
+ arr_nan = np.array([0, 1, np.nan], dtype=dtype)
+ result_nan, mask_nan = _replace_nan(arr_nan, 2)
+ assert_equal(mask_nan, np.array([False, False, True]))
+ assert result_nan is not arr_nan
+ assert_equal(result_nan, np.array([0, 1, 2]))
+ assert np.isnan(arr_nan[-1])
diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py
index 95a465c36..5b07f41c6 100644
--- a/numpy/lib/tests/test_packbits.py
+++ b/numpy/lib/tests/test_packbits.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_raises
import pytest
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 89759bd83..cd0b90dc4 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
@@ -7,7 +5,7 @@ from numpy.testing import (
)
-class TestPolynomial(object):
+class TestPolynomial:
def test_poly1d_str_and_repr(self):
p = np.poly1d([1., 2, 3])
assert_equal(repr(p), 'poly1d([1., 2., 3.])')
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index fa5f4dec2..2f3c14df3 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import pytest
import numpy as np
@@ -19,7 +17,7 @@ zip_descr = np.lib.recfunctions._zip_descr
zip_dtype = np.lib.recfunctions._zip_dtype
-class TestRecFunctions(object):
+class TestRecFunctions:
# Misc tests
def setup(self):
@@ -348,7 +346,7 @@ class TestRecFunctions(object):
assert_equal(b[()], 3)
-class TestRecursiveFillFields(object):
+class TestRecursiveFillFields:
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
@@ -371,7 +369,7 @@ class TestRecursiveFillFields(object):
assert_equal(test, control)
-class TestMergeArrays(object):
+class TestMergeArrays:
# Test merge_arrays
def setup(self):
@@ -504,7 +502,7 @@ class TestMergeArrays(object):
assert_equal(test, control)
-class TestAppendFields(object):
+class TestAppendFields:
# Test append_fields
def setup(self):
@@ -558,7 +556,7 @@ class TestAppendFields(object):
assert_equal(test, control)
-class TestStackArrays(object):
+class TestStackArrays:
# Test stack_arrays
def setup(self):
x = np.array([1, 2, ])
@@ -729,7 +727,7 @@ class TestStackArrays(object):
assert_equal(res.mask, expected.mask)
-class TestJoinBy(object):
+class TestJoinBy:
def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
@@ -772,7 +770,6 @@ class TestJoinBy(object):
def test_join_subdtype(self):
# tests the bug in https://stackoverflow.com/q/44769632/102441
- from numpy.lib import recfunctions as rfn
foo = np.array([(1,)],
dtype=[('key', int)])
bar = np.array([(1, np.array([1,2,3]))],
@@ -895,7 +892,7 @@ class TestJoinBy(object):
assert_equal(res.dtype, expected_dtype)
-class TestJoinBy2(object):
+class TestJoinBy2:
@classmethod
def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
@@ -960,7 +957,7 @@ class TestJoinBy2(object):
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
-class TestAppendFieldsObj(object):
+class TestAppendFieldsObj:
"""
Test append_fields with arrays containing objects
"""
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index 4cd812f5d..55df2a675 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -1,17 +1,13 @@
-from __future__ import division, absolute_import, print_function
-
import os
-import sys
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, _assert_valid_refcount,
)
-from numpy.compat import unicode
-class TestRegression(object):
+class TestRegression:
def test_poly1d(self):
# Ticket #28
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
@@ -183,7 +179,7 @@ class TestRegression(object):
# related to ticket #1405.
include_dirs = [np.get_include()]
for path in include_dirs:
- assert_(isinstance(path, (str, unicode)))
+ assert_(isinstance(path, str))
assert_(path != '')
def test_polyder_return_type(self):
@@ -208,10 +204,7 @@ class TestRegression(object):
def test_loadtxt_fields_subarrays(self):
# For ticket #1936
- if sys.version_info[0] >= 3:
- from io import StringIO
- else:
- from StringIO import StringIO
+ from io import StringIO
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 01ea028bb..fb7ba7874 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -1,7 +1,4 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
-import warnings
import functools
import sys
import pytest
@@ -30,7 +27,7 @@ def _add_keepdims(func):
return wrapped
-class TestTakeAlongAxis(object):
+class TestTakeAlongAxis:
def test_argequivalent(self):
""" Test it translates from arg<func> to <func> """
from numpy.random import rand
@@ -82,7 +79,7 @@ class TestTakeAlongAxis(object):
assert_equal(actual.shape, (3, 2, 5))
-class TestPutAlongAxis(object):
+class TestPutAlongAxis:
def test_replace_max(self):
a_base = np.array([[10, 30, 20], [60, 40, 50]])
@@ -107,7 +104,7 @@ class TestPutAlongAxis(object):
assert_equal(take_along_axis(a, ai, axis=1), 20)
-class TestApplyAlongAxis(object):
+class TestApplyAlongAxis:
def test_simple(self):
a = np.ones((20, 10), 'd')
assert_array_equal(
@@ -273,14 +270,14 @@ class TestApplyAlongAxis(object):
assert_equal(type(actual[i]), type(expected[i]))
-class TestApplyOverAxes(object):
+class TestApplyOverAxes:
def test_simple(self):
a = np.arange(24).reshape(2, 3, 4)
aoa_a = apply_over_axes(np.sum, a, [0, 2])
assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
-class TestExpandDims(object):
+class TestExpandDims:
def test_functionality(self):
s = (2, 3, 4, 5)
a = np.empty(s)
@@ -289,14 +286,26 @@ class TestExpandDims(object):
assert_(b.shape[axis] == 1)
assert_(np.squeeze(b).shape == s)
- def test_deprecations(self):
- # 2017-05-17, 1.13.0
+ def test_axis_tuple(self):
+ a = np.empty((3, 3, 3))
+ assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3)
+ assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1)
+ assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1)
+ assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3)
+
+ def test_axis_out_of_range(self):
s = (2, 3, 4, 5)
a = np.empty(s)
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- assert_warns(DeprecationWarning, expand_dims, a, -6)
- assert_warns(DeprecationWarning, expand_dims, a, 5)
+ assert_raises(np.AxisError, expand_dims, a, -6)
+ assert_raises(np.AxisError, expand_dims, a, 5)
+
+ a = np.empty((3, 3, 3))
+ assert_raises(np.AxisError, expand_dims, a, (0, -6))
+ assert_raises(np.AxisError, expand_dims, a, (0, 5))
+
+ def test_repeated_axis(self):
+ a = np.empty((3, 3, 3))
+ assert_raises(ValueError, expand_dims, a, axis=(1, 1))
def test_subclasses(self):
a = np.arange(10).reshape((2, 5))
@@ -308,7 +317,7 @@ class TestExpandDims(object):
assert_equal(expanded.mask.shape, (2, 1, 5))
-class TestArraySplit(object):
+class TestArraySplit:
def test_integer_0_split(self):
a = np.arange(10)
assert_raises(ValueError, array_split, a, 0)
@@ -442,7 +451,7 @@ class TestArraySplit(object):
compare_results(res, desired)
-class TestSplit(object):
+class TestSplit:
# The split function is essentially the same as array_split,
# except that it test if splitting will result in an
# equal split. Only test for this case.
@@ -458,7 +467,7 @@ class TestSplit(object):
assert_raises(ValueError, split, a, 3)
-class TestColumnStack(object):
+class TestColumnStack:
def test_non_iterable(self):
assert_raises(TypeError, column_stack, 1)
@@ -487,7 +496,7 @@ class TestColumnStack(object):
column_stack((np.arange(3) for _ in range(2)))
-class TestDstack(object):
+class TestDstack:
def test_non_iterable(self):
assert_raises(TypeError, dstack, 1)
@@ -526,7 +535,7 @@ class TestDstack(object):
# array_split has more comprehensive test of splitting.
# only do simple test on hsplit, vsplit, and dsplit
-class TestHsplit(object):
+class TestHsplit:
"""Only testing for integer splits.
"""
@@ -555,7 +564,7 @@ class TestHsplit(object):
compare_results(res, desired)
-class TestVsplit(object):
+class TestVsplit:
"""Only testing for integer splits.
"""
@@ -582,7 +591,7 @@ class TestVsplit(object):
compare_results(res, desired)
-class TestDsplit(object):
+class TestDsplit:
# Only testing for integer splits.
def test_non_iterable(self):
assert_raises(ValueError, dsplit, 1, 1)
@@ -615,7 +624,7 @@ class TestDsplit(object):
compare_results(res, desired)
-class TestSqueeze(object):
+class TestSqueeze:
def test_basic(self):
from numpy.random import rand
@@ -634,7 +643,7 @@ class TestSqueeze(object):
assert_equal(type(res), np.ndarray)
-class TestKron(object):
+class TestKron:
def test_return_type(self):
class myarray(np.ndarray):
__array_priority__ = 0.0
@@ -647,7 +656,7 @@ class TestKron(object):
assert_equal(type(kron(ma, a)), myarray)
-class TestTile(object):
+class TestTile:
def test_basic(self):
a = np.array([0, 1, 2])
b = [[1, 2], [3, 4]]
@@ -687,7 +696,7 @@ class TestTile(object):
assert_equal(large, klarge)
-class TestMayShareMemory(object):
+class TestMayShareMemory:
def test_basic(self):
d = np.ones((50, 60))
d2 = np.ones((30, 60, 6))
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 85fcceedc..9d95eb9d0 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import (
@@ -65,8 +63,7 @@ def test_broadcast_kwargs():
x = np.arange(10)
y = np.arange(10)
- with assert_raises_regex(TypeError,
- r'broadcast_arrays\(\) got an unexpected keyword*'):
+ with assert_raises_regex(TypeError, 'got an unexpected keyword'):
broadcast_arrays(x, y, dtype='float64')
@@ -356,14 +353,12 @@ def as_strided_writeable():
class VerySimpleSubClass(np.ndarray):
def __new__(cls, *args, **kwargs):
- kwargs['subok'] = True
- return np.array(*args, **kwargs).view(cls)
+ return np.array(*args, subok=True, **kwargs).view(cls)
class SimpleSubClass(VerySimpleSubClass):
def __new__(cls, *args, **kwargs):
- kwargs['subok'] = True
- self = np.array(*args, **kwargs).view(cls)
+ self = np.array(*args, subok=True, **kwargs).view(cls)
self.info = 'simple'
return self
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index bb844e4bd..cce683bfe 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -1,8 +1,6 @@
"""Test functions for matrix module
"""
-from __future__ import division, absolute_import, print_function
-
from numpy.testing import (
assert_equal, assert_array_equal, assert_array_max_ulp,
assert_array_almost_equal, assert_raises, assert_
@@ -26,7 +24,7 @@ def get_mat(n):
return data
-class TestEye(object):
+class TestEye:
def test_basic(self):
assert_equal(eye(4),
array([[1, 0, 0, 0],
@@ -108,7 +106,7 @@ class TestEye(object):
assert mat_f.flags.f_contiguous
-class TestDiag(object):
+class TestDiag:
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
@@ -155,7 +153,7 @@ class TestDiag(object):
assert_raises(ValueError, diag, [[[1]]])
-class TestFliplr(object):
+class TestFliplr:
def test_basic(self):
assert_raises(ValueError, fliplr, ones(4))
a = get_mat(4)
@@ -168,7 +166,7 @@ class TestFliplr(object):
assert_equal(fliplr(a), b)
-class TestFlipud(object):
+class TestFlipud:
def test_basic(self):
a = get_mat(4)
b = a[::-1, :]
@@ -180,7 +178,7 @@ class TestFlipud(object):
assert_equal(flipud(a), b)
-class TestHistogram2d(object):
+class TestHistogram2d:
def test_simple(self):
x = array(
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
@@ -298,7 +296,7 @@ class TestHistogram2d(object):
assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d)))
-class TestTri(object):
+class TestTri:
def test_dtype(self):
out = array([[1, 0, 0],
[1, 1, 0],
@@ -436,7 +434,7 @@ def test_tril_indices():
[-10, -10, -10, -10, -10]]))
-class TestTriuIndices(object):
+class TestTriuIndices:
def test_triu_indices(self):
iu1 = triu_indices(4)
iu2 = triu_indices(4, k=2)
@@ -486,21 +484,21 @@ class TestTriuIndices(object):
[16, 17, 18, -1, -1]]))
-class TestTrilIndicesFrom(object):
+class TestTrilIndicesFrom:
def test_exceptions(self):
assert_raises(ValueError, tril_indices_from, np.ones((2,)))
assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))
# assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
-class TestTriuIndicesFrom(object):
+class TestTriuIndicesFrom:
def test_exceptions(self):
assert_raises(ValueError, triu_indices_from, np.ones((2,)))
assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
# assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
-class TestVander(object):
+class TestVander:
def test_basic(self):
c = np.array([0, 1, -2, 3])
v = vander(c)
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index b3f114b92..3f4ca6309 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -1,7 +1,4 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
-from numpy.compat import long
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises
)
@@ -15,7 +12,7 @@ def assert_all(x):
assert_(np.all(x), x)
-class TestCommonType(object):
+class TestCommonType:
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
@@ -31,7 +28,7 @@ class TestCommonType(object):
assert_(common_type(acd) == np.cdouble)
-class TestMintypecode(object):
+class TestMintypecode:
def test_default_1(self):
for itype in '1bcsuwil':
@@ -81,18 +78,17 @@ class TestMintypecode(object):
assert_equal(mintypecode('idD'), 'D')
-class TestIsscalar(object):
+class TestIsscalar:
def test_basic(self):
assert_(np.isscalar(3))
assert_(not np.isscalar([3]))
assert_(not np.isscalar((3,)))
assert_(np.isscalar(3j))
- assert_(np.isscalar(long(10)))
assert_(np.isscalar(4.0))
-class TestReal(object):
+class TestReal:
def test_real(self):
y = np.random.rand(10,)
@@ -123,7 +119,7 @@ class TestReal(object):
assert_(not isinstance(out, np.ndarray))
-class TestImag(object):
+class TestImag:
def test_real(self):
y = np.random.rand(10,)
@@ -154,7 +150,7 @@ class TestImag(object):
assert_(not isinstance(out, np.ndarray))
-class TestIscomplex(object):
+class TestIscomplex:
def test_fail(self):
z = np.array([-1, 0, 1])
@@ -167,7 +163,7 @@ class TestIscomplex(object):
assert_array_equal(res, [1, 0, 0])
-class TestIsreal(object):
+class TestIsreal:
def test_pass(self):
z = np.array([-1, 0, 1j])
@@ -180,7 +176,7 @@ class TestIsreal(object):
assert_array_equal(res, [0, 1, 1])
-class TestIscomplexobj(object):
+class TestIscomplexobj:
def test_basic(self):
z = np.array([-1, 0, 1])
@@ -209,7 +205,7 @@ class TestIscomplexobj(object):
# (pandas.core.dtypes)
class PdComplex(np.complex128):
pass
- class PdDtype(object):
+ class PdDtype:
name = 'category'
names = None
type = PdComplex
@@ -233,7 +229,7 @@ class TestIscomplexobj(object):
assert_(iscomplexobj(a))
-class TestIsrealobj(object):
+class TestIsrealobj:
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
@@ -241,7 +237,7 @@ class TestIsrealobj(object):
assert_(not isrealobj(z))
-class TestIsnan(object):
+class TestIsnan:
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
@@ -271,7 +267,7 @@ class TestIsnan(object):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
-class TestIsfinite(object):
+class TestIsfinite:
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
@@ -302,7 +298,7 @@ class TestIsfinite(object):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
-class TestIsinf(object):
+class TestIsinf:
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
@@ -331,7 +327,7 @@ class TestIsinf(object):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
-class TestIsposinf(object):
+class TestIsposinf:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -341,7 +337,7 @@ class TestIsposinf(object):
assert_(vals[2] == 1)
-class TestIsneginf(object):
+class TestIsneginf:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -351,7 +347,7 @@ class TestIsneginf(object):
assert_(vals[2] == 0)
-class TestNanToNum(object):
+class TestNanToNum:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -456,7 +452,7 @@ class TestNanToNum(object):
assert_equal(type(vals), np.ndarray)
-class TestRealIfClose(object):
+class TestRealIfClose:
def test_basic(self):
a = np.random.rand(10)
@@ -469,7 +465,7 @@ class TestRealIfClose(object):
assert_all(isrealobj(b))
-class TestArrayConversion(object):
+class TestArrayConversion:
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 0f06876a1..c280b6969 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
import numpy.core as nx
import numpy.lib.ufunclike as ufl
@@ -8,7 +6,7 @@ from numpy.testing import (
)
-class TestUfunclike(object):
+class TestUfunclike:
def test_isposinf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
@@ -21,7 +19,7 @@ class TestUfunclike(object):
assert_equal(res, tgt)
assert_equal(out, tgt)
- a = a.astype(np.complex)
+ a = a.astype(np.complex_)
with assert_raises(TypeError):
ufl.isposinf(a)
@@ -36,7 +34,7 @@ class TestUfunclike(object):
assert_equal(res, tgt)
assert_equal(out, tgt)
- a = a.astype(np.complex)
+ a = a.astype(np.complex_)
with assert_raises(TypeError):
ufl.isneginf(a)
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index 9673a05fa..c96bf795a 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import inspect
import sys
import pytest
@@ -9,10 +7,7 @@ from numpy.testing import assert_, assert_equal, assert_raises_regex
from numpy.lib import deprecate
import numpy.lib.utils as utils
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from StringIO import StringIO
+from io import StringIO
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
@@ -102,7 +97,7 @@ def test_safe_eval_nameconstant():
utils.safe_eval('None')
-class TestByteBounds(object):
+class TestByteBounds:
def test_byte_bounds(self):
# pointer difference matches size * itemsize
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index f45392188..320a24856 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -1,18 +1,16 @@
""" Basic functions for manipulating 2d arrays
"""
-from __future__ import division, absolute_import, print_function
-
import functools
from numpy.core.numeric import (
- absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
+ asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core.overrides import set_module
from numpy.core import overrides
-from numpy.core import iinfo, transpose
+from numpy.core import iinfo
__all__ = [
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 586824743..2a2982ab3 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -1,7 +1,6 @@
"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py
"""
-from __future__ import division, absolute_import, print_function
import functools
import warnings
@@ -68,16 +67,14 @@ def mintypecode(typechars, typeset='GDFgdf', default='d'):
'G'
"""
- typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char
- for t in typechars]
- intersection = [t for t in typecodes if t in typeset]
+ typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
+ for t in typechars)
+ intersection = set(t for t in typecodes if t in typeset)
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
- l = [(_typecodes_by_elsize.index(t), t) for t in intersection]
- l.sort()
- return l[0][1]
+ return min(intersection, key=_typecodes_by_elsize.index)
def _asfarray_dispatcher(a, dtype=None):
@@ -495,7 +492,8 @@ def _real_if_close_dispatcher(a, tol=None):
@array_function_dispatch(_real_if_close_dispatcher)
def real_if_close(a, tol=100):
"""
- If complex input returns a real array if complex parts are close to zero.
+ If input is complex with all imaginary parts close to zero, return
+ real parts.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
@@ -530,10 +528,10 @@ def real_if_close(a, tol=100):
>>> np.finfo(float).eps
2.2204460492503131e-16 # may vary
- >>> np.real_if_close([2.1 + 4e-14j], tol=1000)
- array([2.1])
- >>> np.real_if_close([2.1 + 4e-13j], tol=1000)
- array([2.1+4.e-13j])
+ >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)
+ array([2.1, 5.2])
+ >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)
+ array([2.1+4.e-13j, 5.2 + 3e-15j])
"""
a = asanyarray(a)
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index 96fd5b319..8512669c2 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -3,8 +3,6 @@ Module of functions that are like ufuncs in acting on arrays and optionally
storing results in an output array.
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy.core.numeric as nx
@@ -85,13 +83,20 @@ def fix(x, out=None):
----------
x : array_like
An array of floats to be rounded
- y : ndarray, optional
- Output array
+ out : ndarray, optional
+ A location into which the result is stored. If provided, it must have
+ a shape that the input broadcasts to. If not provided or None, a
+ freshly-allocated array is returned.
Returns
-------
out : ndarray of floats
- The array of rounded numbers
+ A float array with the same dimensions as the input.
+ If second argument is not supplied then a float array is returned
+ with the rounded values.
+
+ If a second argument is supplied the result is stored there.
+ The return value `out` is then a reference to that array.
See Also
--------
@@ -129,8 +134,10 @@ def isposinf(x, out=None):
----------
x : array_like
The input array.
- y : array_like, optional
- A boolean array with the same shape as `x` to store the result.
+ out : array_like, optional
+ A location into which the result is stored. If provided, it must have a
+ shape that the input broadcasts to. If not provided or None, a
+ freshly-allocated boolean array is returned.
Returns
-------
@@ -199,8 +206,9 @@ def isneginf(x, out=None):
x : array_like
The input array.
out : array_like, optional
- A boolean array with the same shape and type as `x` to store the
- result.
+ A location into which the result is stored. If provided, it must have a
+ shape that the input broadcasts to. If not provided or None, a
+ freshly-allocated boolean array is returned.
Returns
-------
diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py
index f1510a7b1..0e96b477e 100644
--- a/numpy/lib/user_array.py
+++ b/numpy/lib/user_array.py
@@ -5,18 +5,15 @@ Try to inherit from the ndarray instead of using this class as this is not
complete.
"""
-from __future__ import division, absolute_import, print_function
-
from numpy.core import (
array, asarray, absolute, add, subtract, multiply, divide,
remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
greater_equal, shape, reshape, arange, sin, sqrt, transpose
)
-from numpy.compat import long
-class container(object):
+class container:
"""
container(data, dtype=None, copy=True)
@@ -198,9 +195,6 @@ class container(object):
def __int__(self):
return self._scalarfunc(int)
- def __long__(self):
- return self._scalarfunc(long)
-
def __hex__(self):
return self._scalarfunc(hex)
@@ -233,6 +227,10 @@ class container(object):
""
return self.array.tostring()
+ def tobytes(self):
+ ""
+ return self.array.tobytes()
+
def byteswap(self):
""
return self._rc(self.array.byteswap())
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 3c71d2a7c..f233c7240 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import types
@@ -55,7 +53,7 @@ def _set_function_name(func, name):
return func
-class _Deprecate(object):
+class _Deprecate:
"""
Decorator class to deprecate old functions.
@@ -607,41 +605,6 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
)
print(" %s -- %s" % (meth, methstr), file=output)
- elif (sys.version_info[0] < 3
- and isinstance(object, types.InstanceType)):
- # check for __call__ method
- # types.InstanceType is the type of the instances of oldstyle classes
- print("Instance of class: ", object.__class__.__name__, file=output)
- print(file=output)
- if hasattr(object, '__call__'):
- arguments = formatargspec(
- *getargspec(object.__call__.__func__)
- )
- arglist = arguments.split(', ')
- if len(arglist) > 1:
- arglist[1] = "("+arglist[1]
- arguments = ", ".join(arglist[1:])
- else:
- arguments = "()"
-
- if hasattr(object, 'name'):
- name = "%s" % object.name
- else:
- name = "<name>"
- if len(name+arguments) > maxwidth:
- argstr = _split_line(name, arguments, maxwidth)
- else:
- argstr = name + arguments
-
- print(" " + argstr + "\n", file=output)
- doc = inspect.getdoc(object.__call__)
- if doc is not None:
- print(inspect.getdoc(object.__call__), file=output)
- print(inspect.getdoc(object), file=output)
-
- else:
- print(inspect.getdoc(object), file=output)
-
elif inspect.ismethod(object):
name = object.__name__
arguments = formatargspec(
@@ -869,15 +832,10 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
or newly generated.
"""
- global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
- if sys.version_info[0] >= 3:
- # In Python3 stderr, stdout are text files.
- from io import StringIO
- else:
- from StringIO import StringIO
+ from io import StringIO
if module is None:
module = "numpy"
diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py
index 55560815d..3a53ac6ec 100644
--- a/numpy/linalg/__init__.py
+++ b/numpy/linalg/__init__.py
@@ -69,8 +69,6 @@ Exceptions
LinAlgError
"""
-from __future__ import division, absolute_import, print_function
-
# To get sub-modules
from .linalg import *
diff --git a/numpy/linalg/lapack_lite/README.rst b/numpy/linalg/lapack_lite/README.rst
index 1343d25f8..ed738ab86 100644
--- a/numpy/linalg/lapack_lite/README.rst
+++ b/numpy/linalg/lapack_lite/README.rst
@@ -18,9 +18,9 @@ and is unlikely to ever be ported to python 3.
The routines that ``lapack_litemodule.c`` wraps are listed in
``wrapped_routines``, along with a few exceptions that aren't picked up
properly. Assuming that you have an unpacked LAPACK source tree in
-``~/LAPACK``, you generate the new routines in a directory ``new-lite/`` with::
+``~/LAPACK``, you generate the new routines in this directory with::
-$ python2 ./make_lite.py wrapped_routines ~/LAPACK new-lite/
+$ python ./make_lite.py wrapped_routines ~/LAPACK
This will grab the right routines, with dependencies, put them into the
appropriate ``f2c_*.f`` files, run ``f2c`` over them, then do some scrubbing
diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py
index 434586113..f3e7d25d2 100644
--- a/numpy/linalg/lapack_lite/clapack_scrub.py
+++ b/numpy/linalg/lapack_lite/clapack_scrub.py
@@ -1,17 +1,12 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-import sys, os
+#!/usr/bin/env python3
+import os
import re
+import sys
+from io import StringIO
+
from plex import Scanner, Str, Lexicon, Opt, Bol, State, AnyChar, TEXT, IGNORE
from plex.traditional import re as Re
-PY2 = sys.version_info < (3, 0)
-
-if PY2:
- from io import BytesIO as UStringIO
-else:
- from io import StringIO as UStringIO
class MyScanner(Scanner):
def __init__(self, info, name='<default>'):
@@ -27,8 +22,8 @@ def sep_seq(sequence, sep):
return pat
def runScanner(data, scanner_class, lexicon=None):
- info = UStringIO(data)
- outfo = UStringIO()
+ info = StringIO(data)
+ outfo = StringIO()
if lexicon is not None:
scanner = scanner_class(lexicon, info)
else:
@@ -106,7 +101,7 @@ def cleanSource(source):
source = re.sub(r'\n\n\n\n+', r'\n\n\n', source)
return source
-class LineQueue(object):
+class LineQueue:
def __init__(self):
object.__init__(self)
self._queue = []
@@ -195,7 +190,7 @@ def cleanComments(source):
return SourceLines
state = SourceLines
- for line in UStringIO(source):
+ for line in StringIO(source):
state = state(line)
comments.flushTo(lines)
return lines.getValue()
@@ -223,7 +218,7 @@ def removeHeader(source):
return OutOfHeader
state = LookingForHeader
- for line in UStringIO(source):
+ for line in StringIO(source):
state = state(line)
return lines.getValue()
@@ -232,7 +227,7 @@ def removeSubroutinePrototypes(source):
r'/[*] Subroutine [*]/^\s*(?:(?:inline|static)\s+){0,2}(?!else|typedef|return)\w+\s+\*?\s*(\w+)\s*\([^0]+\)\s*;?'
)
lines = LineQueue()
- for line in UStringIO(source):
+ for line in StringIO(source):
if not expression.match(line):
lines.add(line)
@@ -254,7 +249,7 @@ def removeBuiltinFunctions(source):
return InBuiltInFunctions
state = LookingForBuiltinFunctions
- for line in UStringIO(source):
+ for line in StringIO(source):
state = state(line)
return lines.getValue()
diff --git a/numpy/linalg/lapack_lite/f2c.c b/numpy/linalg/lapack_lite/f2c.c
index 1114bef3b..9a1e9cec1 100644
--- a/numpy/linalg/lapack_lite/f2c.c
+++ b/numpy/linalg/lapack_lite/f2c.c
@@ -567,7 +567,7 @@ if( (abi = b->i) < 0.f)
abi = - abi;
if( abr <= abi )
{
- /*Let IEEE Infinties handle this ;( */
+ /*Let IEEE Infinities handle this ;( */
/*if(abi == 0)
sig_die("complex division by zero", 1);*/
ratio = b->r / b->i ;
@@ -603,7 +603,7 @@ if( (abi = b->i) < 0.)
abi = - abi;
if( abr <= abi )
{
- /*Let IEEE Infinties handle this ;( */
+ /*Let IEEE Infinities handle this ;( */
/*if(abi == 0)
sig_die("complex division by zero", 1);*/
ratio = b->r / b->i ;
diff --git a/numpy/linalg/lapack_lite/f2c.h b/numpy/linalg/lapack_lite/f2c.h
index 80f1a12b1..d3fbfc177 100644
--- a/numpy/linalg/lapack_lite/f2c.h
+++ b/numpy/linalg/lapack_lite/f2c.h
@@ -8,15 +8,19 @@
#define F2C_INCLUDE
#include <math.h>
+#include "numpy/npy_common.h"
+#include "npy_cblas.h"
-typedef int integer;
+#include "lapack_lite_names.h"
+
+typedef CBLAS_INT integer;
typedef char *address;
typedef short int shortint;
typedef float real;
typedef double doublereal;
typedef struct { real r, i; } complex;
typedef struct { doublereal r, i; } doublecomplex;
-typedef int logical;
+typedef CBLAS_INT logical;
typedef short int shortlogical;
typedef char logical1;
typedef char integer1;
@@ -37,9 +41,9 @@ typedef short flag;
typedef short ftnlen;
typedef short ftnint;
#else
-typedef int flag;
-typedef int ftnlen;
-typedef int ftnint;
+typedef CBLAS_INT flag;
+typedef CBLAS_INT ftnlen;
+typedef CBLAS_INT ftnint;
#endif
/*external read, write*/
@@ -352,7 +356,7 @@ extern void s_copy(char *, char *, ftnlen, ftnlen);
extern int s_paus(char *, ftnlen);
extern integer s_rdfe(cilist *);
extern integer s_rdue(cilist *);
-extern integer s_rnge(char *, integer, char *, integer);
+extern int s_rnge(char *, int, char *, int);
extern integer s_rsfe(cilist *);
extern integer s_rsfi(icilist *);
extern integer s_rsle(cilist *);
@@ -381,6 +385,9 @@ extern void z_log(doublecomplex *, doublecomplex *);
extern void z_sin(doublecomplex *, doublecomplex *);
extern void z_sqrt(doublecomplex *, doublecomplex *);
+extern double f__cabs(double, double);
+extern double f__cabsf(float, float);
+
#ifdef __cplusplus
}
#endif
diff --git a/numpy/linalg/lapack_lite/f2c_blas.c b/numpy/linalg/lapack_lite/f2c_blas.c
index 3af506b71..65286892f 100644
--- a/numpy/linalg/lapack_lite/f2c_blas.c
+++ b/numpy/linalg/lapack_lite/f2c_blas.c
@@ -1,7 +1,7 @@
/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
+ * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
+ * information on remaking this file.
+ */
#include "f2c.h"
#ifdef HAVE_CONFIG
diff --git a/numpy/linalg/lapack_lite/f2c_c_lapack.c b/numpy/linalg/lapack_lite/f2c_c_lapack.c
index f52e1e157..c36c0e368 100644
--- a/numpy/linalg/lapack_lite/f2c_c_lapack.c
+++ b/numpy/linalg/lapack_lite/f2c_c_lapack.c
@@ -1,7 +1,7 @@
/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
+ * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
+ * information on remaking this file.
+ */
#include "f2c.h"
#ifdef HAVE_CONFIG
diff --git a/numpy/linalg/lapack_lite/f2c_config.c b/numpy/linalg/lapack_lite/f2c_config.c
index 2fe608227..3f59e0263 100644
--- a/numpy/linalg/lapack_lite/f2c_config.c
+++ b/numpy/linalg/lapack_lite/f2c_config.c
@@ -1,7 +1,7 @@
/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
+ * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
+ * information on remaking this file.
+ */
#include "f2c.h"
#ifdef HAVE_CONFIG
diff --git a/numpy/linalg/lapack_lite/f2c_d_lapack.c b/numpy/linalg/lapack_lite/f2c_d_lapack.c
index 1a6675ef1..233db74b9 100644
--- a/numpy/linalg/lapack_lite/f2c_d_lapack.c
+++ b/numpy/linalg/lapack_lite/f2c_d_lapack.c
@@ -1,7 +1,7 @@
/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
+ * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
+ * information on remaking this file.
+ */
#include "f2c.h"
#ifdef HAVE_CONFIG
diff --git a/numpy/linalg/lapack_lite/f2c_lapack.c b/numpy/linalg/lapack_lite/f2c_lapack.c
index d956ddbbb..752261044 100644
--- a/numpy/linalg/lapack_lite/f2c_lapack.c
+++ b/numpy/linalg/lapack_lite/f2c_lapack.c
@@ -1,7 +1,7 @@
/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
+ * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
+ * information on remaking this file.
+ */
#include "f2c.h"
#ifdef HAVE_CONFIG
diff --git a/numpy/linalg/lapack_lite/f2c_s_lapack.c b/numpy/linalg/lapack_lite/f2c_s_lapack.c
index fccb1f58b..2a32315c7 100644
--- a/numpy/linalg/lapack_lite/f2c_s_lapack.c
+++ b/numpy/linalg/lapack_lite/f2c_s_lapack.c
@@ -1,7 +1,7 @@
/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
+ * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
+ * information on remaking this file.
+ */
#include "f2c.h"
#ifdef HAVE_CONFIG
diff --git a/numpy/linalg/lapack_lite/f2c_z_lapack.c b/numpy/linalg/lapack_lite/f2c_z_lapack.c
index 0f11f2e72..8234eca41 100644
--- a/numpy/linalg/lapack_lite/f2c_z_lapack.c
+++ b/numpy/linalg/lapack_lite/f2c_z_lapack.c
@@ -1,7 +1,7 @@
/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
+ * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
+ * information on remaking this file.
+ */
#include "f2c.h"
#ifdef HAVE_CONFIG
diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py
index dc0a5ebd9..3aaefb92f 100644
--- a/numpy/linalg/lapack_lite/fortran.py
+++ b/numpy/linalg/lapack_lite/fortran.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import re
import itertools
@@ -14,7 +12,7 @@ def isContinuation(line):
COMMENT, STATEMENT, CONTINUATION = 0, 1, 2
def lineType(line):
- """Return the type of a line of Fortan code."""
+ """Return the type of a line of Fortran code."""
if isBlank(line):
return COMMENT
elif isLabel(line):
@@ -26,7 +24,7 @@ def lineType(line):
else:
return STATEMENT
-class LineIterator(object):
+class LineIterator:
"""LineIterator(iterable)
Return rstrip()'d lines from iterable, while keeping a count of the
@@ -46,10 +44,8 @@ class LineIterator(object):
line = line.rstrip()
return line
- next = __next__
-
-class PushbackIterator(object):
+class PushbackIterator:
"""PushbackIterator(iterable)
Return an iterator for which items can be pushed back into.
@@ -73,8 +69,6 @@ class PushbackIterator(object):
def pushback(self, item):
self.buffer.append(item)
- next = __next__
-
def fortranSourceLines(fo):
"""Return an iterator over statement lines of a Fortran source file.
diff --git a/numpy/linalg/lapack_lite/lapack_lite_names.h b/numpy/linalg/lapack_lite/lapack_lite_names.h
new file mode 100644
index 000000000..08fd7257d
--- /dev/null
+++ b/numpy/linalg/lapack_lite/lapack_lite_names.h
@@ -0,0 +1,691 @@
+/*
+ * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
+ * information on remaking this file.
+ */
+/*
+ * This file renames all BLAS/LAPACK and f2c symbols to avoid
+ * dynamic symbol name conflicts, in cases where e.g.
+ * integer sizes do not match with 'standard' ABI.
+ */
+#define caxpy_ BLAS_FUNC(caxpy)
+#define ccopy_ BLAS_FUNC(ccopy)
+#define cdotc_ BLAS_FUNC(cdotc)
+#define cdotu_ BLAS_FUNC(cdotu)
+#define cgebak_ BLAS_FUNC(cgebak)
+#define cgebal_ BLAS_FUNC(cgebal)
+#define cgebd2_ BLAS_FUNC(cgebd2)
+#define cgebrd_ BLAS_FUNC(cgebrd)
+#define cgeev_ BLAS_FUNC(cgeev)
+#define cgehd2_ BLAS_FUNC(cgehd2)
+#define cgehrd_ BLAS_FUNC(cgehrd)
+#define cgelq2_ BLAS_FUNC(cgelq2)
+#define cgelqf_ BLAS_FUNC(cgelqf)
+#define cgelsd_ BLAS_FUNC(cgelsd)
+#define cgemm_ BLAS_FUNC(cgemm)
+#define cgemv_ BLAS_FUNC(cgemv)
+#define cgeqr2_ BLAS_FUNC(cgeqr2)
+#define cgeqrf_ BLAS_FUNC(cgeqrf)
+#define cgerc_ BLAS_FUNC(cgerc)
+#define cgeru_ BLAS_FUNC(cgeru)
+#define cgesdd_ BLAS_FUNC(cgesdd)
+#define cgesv_ BLAS_FUNC(cgesv)
+#define cgetf2_ BLAS_FUNC(cgetf2)
+#define cgetrf_ BLAS_FUNC(cgetrf)
+#define cgetrs_ BLAS_FUNC(cgetrs)
+#define cheevd_ BLAS_FUNC(cheevd)
+#define chemv_ BLAS_FUNC(chemv)
+#define cher2_ BLAS_FUNC(cher2)
+#define cher2k_ BLAS_FUNC(cher2k)
+#define cherk_ BLAS_FUNC(cherk)
+#define chetd2_ BLAS_FUNC(chetd2)
+#define chetrd_ BLAS_FUNC(chetrd)
+#define chseqr_ BLAS_FUNC(chseqr)
+#define clabrd_ BLAS_FUNC(clabrd)
+#define clacgv_ BLAS_FUNC(clacgv)
+#define clacp2_ BLAS_FUNC(clacp2)
+#define clacpy_ BLAS_FUNC(clacpy)
+#define clacrm_ BLAS_FUNC(clacrm)
+#define cladiv_ BLAS_FUNC(cladiv)
+#define claed0_ BLAS_FUNC(claed0)
+#define claed7_ BLAS_FUNC(claed7)
+#define claed8_ BLAS_FUNC(claed8)
+#define clahqr_ BLAS_FUNC(clahqr)
+#define clahr2_ BLAS_FUNC(clahr2)
+#define clals0_ BLAS_FUNC(clals0)
+#define clalsa_ BLAS_FUNC(clalsa)
+#define clalsd_ BLAS_FUNC(clalsd)
+#define clange_ BLAS_FUNC(clange)
+#define clanhe_ BLAS_FUNC(clanhe)
+#define claqr0_ BLAS_FUNC(claqr0)
+#define claqr1_ BLAS_FUNC(claqr1)
+#define claqr2_ BLAS_FUNC(claqr2)
+#define claqr3_ BLAS_FUNC(claqr3)
+#define claqr4_ BLAS_FUNC(claqr4)
+#define claqr5_ BLAS_FUNC(claqr5)
+#define clarcm_ BLAS_FUNC(clarcm)
+#define clarf_ BLAS_FUNC(clarf)
+#define clarfb_ BLAS_FUNC(clarfb)
+#define clarfg_ BLAS_FUNC(clarfg)
+#define clarft_ BLAS_FUNC(clarft)
+#define clartg_ BLAS_FUNC(clartg)
+#define clascl_ BLAS_FUNC(clascl)
+#define claset_ BLAS_FUNC(claset)
+#define clasr_ BLAS_FUNC(clasr)
+#define classq_ BLAS_FUNC(classq)
+#define claswp_ BLAS_FUNC(claswp)
+#define clatrd_ BLAS_FUNC(clatrd)
+#define clatrs_ BLAS_FUNC(clatrs)
+#define clauu2_ BLAS_FUNC(clauu2)
+#define clauum_ BLAS_FUNC(clauum)
+#define cpotf2_ BLAS_FUNC(cpotf2)
+#define cpotrf_ BLAS_FUNC(cpotrf)
+#define cpotri_ BLAS_FUNC(cpotri)
+#define cpotrs_ BLAS_FUNC(cpotrs)
+#define crot_ BLAS_FUNC(crot)
+#define cscal_ BLAS_FUNC(cscal)
+#define csrot_ BLAS_FUNC(csrot)
+#define csscal_ BLAS_FUNC(csscal)
+#define cstedc_ BLAS_FUNC(cstedc)
+#define csteqr_ BLAS_FUNC(csteqr)
+#define cswap_ BLAS_FUNC(cswap)
+#define ctrevc_ BLAS_FUNC(ctrevc)
+#define ctrexc_ BLAS_FUNC(ctrexc)
+#define ctrmm_ BLAS_FUNC(ctrmm)
+#define ctrmv_ BLAS_FUNC(ctrmv)
+#define ctrsm_ BLAS_FUNC(ctrsm)
+#define ctrsv_ BLAS_FUNC(ctrsv)
+#define ctrti2_ BLAS_FUNC(ctrti2)
+#define ctrtri_ BLAS_FUNC(ctrtri)
+#define cung2r_ BLAS_FUNC(cung2r)
+#define cungbr_ BLAS_FUNC(cungbr)
+#define cunghr_ BLAS_FUNC(cunghr)
+#define cungl2_ BLAS_FUNC(cungl2)
+#define cunglq_ BLAS_FUNC(cunglq)
+#define cungqr_ BLAS_FUNC(cungqr)
+#define cunm2l_ BLAS_FUNC(cunm2l)
+#define cunm2r_ BLAS_FUNC(cunm2r)
+#define cunmbr_ BLAS_FUNC(cunmbr)
+#define cunmhr_ BLAS_FUNC(cunmhr)
+#define cunml2_ BLAS_FUNC(cunml2)
+#define cunmlq_ BLAS_FUNC(cunmlq)
+#define cunmql_ BLAS_FUNC(cunmql)
+#define cunmqr_ BLAS_FUNC(cunmqr)
+#define cunmtr_ BLAS_FUNC(cunmtr)
+#define daxpy_ BLAS_FUNC(daxpy)
+#define dbdsdc_ BLAS_FUNC(dbdsdc)
+#define dbdsqr_ BLAS_FUNC(dbdsqr)
+#define dcabs1_ BLAS_FUNC(dcabs1)
+#define dcopy_ BLAS_FUNC(dcopy)
+#define ddot_ BLAS_FUNC(ddot)
+#define dgebak_ BLAS_FUNC(dgebak)
+#define dgebal_ BLAS_FUNC(dgebal)
+#define dgebd2_ BLAS_FUNC(dgebd2)
+#define dgebrd_ BLAS_FUNC(dgebrd)
+#define dgeev_ BLAS_FUNC(dgeev)
+#define dgehd2_ BLAS_FUNC(dgehd2)
+#define dgehrd_ BLAS_FUNC(dgehrd)
+#define dgelq2_ BLAS_FUNC(dgelq2)
+#define dgelqf_ BLAS_FUNC(dgelqf)
+#define dgelsd_ BLAS_FUNC(dgelsd)
+#define dgemm_ BLAS_FUNC(dgemm)
+#define dgemv_ BLAS_FUNC(dgemv)
+#define dgeqr2_ BLAS_FUNC(dgeqr2)
+#define dgeqrf_ BLAS_FUNC(dgeqrf)
+#define dger_ BLAS_FUNC(dger)
+#define dgesdd_ BLAS_FUNC(dgesdd)
+#define dgesv_ BLAS_FUNC(dgesv)
+#define dgetf2_ BLAS_FUNC(dgetf2)
+#define dgetrf_ BLAS_FUNC(dgetrf)
+#define dgetrs_ BLAS_FUNC(dgetrs)
+#define dhseqr_ BLAS_FUNC(dhseqr)
+#define disnan_ BLAS_FUNC(disnan)
+#define dlabad_ BLAS_FUNC(dlabad)
+#define dlabrd_ BLAS_FUNC(dlabrd)
+#define dlacpy_ BLAS_FUNC(dlacpy)
+#define dladiv_ BLAS_FUNC(dladiv)
+#define dlae2_ BLAS_FUNC(dlae2)
+#define dlaed0_ BLAS_FUNC(dlaed0)
+#define dlaed1_ BLAS_FUNC(dlaed1)
+#define dlaed2_ BLAS_FUNC(dlaed2)
+#define dlaed3_ BLAS_FUNC(dlaed3)
+#define dlaed4_ BLAS_FUNC(dlaed4)
+#define dlaed5_ BLAS_FUNC(dlaed5)
+#define dlaed6_ BLAS_FUNC(dlaed6)
+#define dlaed7_ BLAS_FUNC(dlaed7)
+#define dlaed8_ BLAS_FUNC(dlaed8)
+#define dlaed9_ BLAS_FUNC(dlaed9)
+#define dlaeda_ BLAS_FUNC(dlaeda)
+#define dlaev2_ BLAS_FUNC(dlaev2)
+#define dlaexc_ BLAS_FUNC(dlaexc)
+#define dlahqr_ BLAS_FUNC(dlahqr)
+#define dlahr2_ BLAS_FUNC(dlahr2)
+#define dlaisnan_ BLAS_FUNC(dlaisnan)
+#define dlaln2_ BLAS_FUNC(dlaln2)
+#define dlals0_ BLAS_FUNC(dlals0)
+#define dlalsa_ BLAS_FUNC(dlalsa)
+#define dlalsd_ BLAS_FUNC(dlalsd)
+#define dlamc1_ BLAS_FUNC(dlamc1)
+#define dlamc2_ BLAS_FUNC(dlamc2)
+#define dlamc3_ BLAS_FUNC(dlamc3)
+#define dlamc4_ BLAS_FUNC(dlamc4)
+#define dlamc5_ BLAS_FUNC(dlamc5)
+#define dlamch_ BLAS_FUNC(dlamch)
+#define dlamrg_ BLAS_FUNC(dlamrg)
+#define dlange_ BLAS_FUNC(dlange)
+#define dlanst_ BLAS_FUNC(dlanst)
+#define dlansy_ BLAS_FUNC(dlansy)
+#define dlanv2_ BLAS_FUNC(dlanv2)
+#define dlapy2_ BLAS_FUNC(dlapy2)
+#define dlapy3_ BLAS_FUNC(dlapy3)
+#define dlaqr0_ BLAS_FUNC(dlaqr0)
+#define dlaqr1_ BLAS_FUNC(dlaqr1)
+#define dlaqr2_ BLAS_FUNC(dlaqr2)
+#define dlaqr3_ BLAS_FUNC(dlaqr3)
+#define dlaqr4_ BLAS_FUNC(dlaqr4)
+#define dlaqr5_ BLAS_FUNC(dlaqr5)
+#define dlarf_ BLAS_FUNC(dlarf)
+#define dlarfb_ BLAS_FUNC(dlarfb)
+#define dlarfg_ BLAS_FUNC(dlarfg)
+#define dlarft_ BLAS_FUNC(dlarft)
+#define dlarfx_ BLAS_FUNC(dlarfx)
+#define dlartg_ BLAS_FUNC(dlartg)
+#define dlas2_ BLAS_FUNC(dlas2)
+#define dlascl_ BLAS_FUNC(dlascl)
+#define dlasd0_ BLAS_FUNC(dlasd0)
+#define dlasd1_ BLAS_FUNC(dlasd1)
+#define dlasd2_ BLAS_FUNC(dlasd2)
+#define dlasd3_ BLAS_FUNC(dlasd3)
+#define dlasd4_ BLAS_FUNC(dlasd4)
+#define dlasd5_ BLAS_FUNC(dlasd5)
+#define dlasd6_ BLAS_FUNC(dlasd6)
+#define dlasd7_ BLAS_FUNC(dlasd7)
+#define dlasd8_ BLAS_FUNC(dlasd8)
+#define dlasda_ BLAS_FUNC(dlasda)
+#define dlasdq_ BLAS_FUNC(dlasdq)
+#define dlasdt_ BLAS_FUNC(dlasdt)
+#define dlaset_ BLAS_FUNC(dlaset)
+#define dlasq1_ BLAS_FUNC(dlasq1)
+#define dlasq2_ BLAS_FUNC(dlasq2)
+#define dlasq3_ BLAS_FUNC(dlasq3)
+#define dlasq4_ BLAS_FUNC(dlasq4)
+#define dlasq5_ BLAS_FUNC(dlasq5)
+#define dlasq6_ BLAS_FUNC(dlasq6)
+#define dlasr_ BLAS_FUNC(dlasr)
+#define dlasrt_ BLAS_FUNC(dlasrt)
+#define dlassq_ BLAS_FUNC(dlassq)
+#define dlasv2_ BLAS_FUNC(dlasv2)
+#define dlaswp_ BLAS_FUNC(dlaswp)
+#define dlasy2_ BLAS_FUNC(dlasy2)
+#define dlatrd_ BLAS_FUNC(dlatrd)
+#define dlauu2_ BLAS_FUNC(dlauu2)
+#define dlauum_ BLAS_FUNC(dlauum)
+#define dnrm2_ BLAS_FUNC(dnrm2)
+#define dorg2r_ BLAS_FUNC(dorg2r)
+#define dorgbr_ BLAS_FUNC(dorgbr)
+#define dorghr_ BLAS_FUNC(dorghr)
+#define dorgl2_ BLAS_FUNC(dorgl2)
+#define dorglq_ BLAS_FUNC(dorglq)
+#define dorgqr_ BLAS_FUNC(dorgqr)
+#define dorm2l_ BLAS_FUNC(dorm2l)
+#define dorm2r_ BLAS_FUNC(dorm2r)
+#define dormbr_ BLAS_FUNC(dormbr)
+#define dormhr_ BLAS_FUNC(dormhr)
+#define dorml2_ BLAS_FUNC(dorml2)
+#define dormlq_ BLAS_FUNC(dormlq)
+#define dormql_ BLAS_FUNC(dormql)
+#define dormqr_ BLAS_FUNC(dormqr)
+#define dormtr_ BLAS_FUNC(dormtr)
+#define dpotf2_ BLAS_FUNC(dpotf2)
+#define dpotrf_ BLAS_FUNC(dpotrf)
+#define dpotri_ BLAS_FUNC(dpotri)
+#define dpotrs_ BLAS_FUNC(dpotrs)
+#define drot_ BLAS_FUNC(drot)
+#define dscal_ BLAS_FUNC(dscal)
+#define dstedc_ BLAS_FUNC(dstedc)
+#define dsteqr_ BLAS_FUNC(dsteqr)
+#define dsterf_ BLAS_FUNC(dsterf)
+#define dswap_ BLAS_FUNC(dswap)
+#define dsyevd_ BLAS_FUNC(dsyevd)
+#define dsymv_ BLAS_FUNC(dsymv)
+#define dsyr2_ BLAS_FUNC(dsyr2)
+#define dsyr2k_ BLAS_FUNC(dsyr2k)
+#define dsyrk_ BLAS_FUNC(dsyrk)
+#define dsytd2_ BLAS_FUNC(dsytd2)
+#define dsytrd_ BLAS_FUNC(dsytrd)
+#define dtrevc_ BLAS_FUNC(dtrevc)
+#define dtrexc_ BLAS_FUNC(dtrexc)
+#define dtrmm_ BLAS_FUNC(dtrmm)
+#define dtrmv_ BLAS_FUNC(dtrmv)
+#define dtrsm_ BLAS_FUNC(dtrsm)
+#define dtrti2_ BLAS_FUNC(dtrti2)
+#define dtrtri_ BLAS_FUNC(dtrtri)
+#define dzasum_ BLAS_FUNC(dzasum)
+#define dznrm2_ BLAS_FUNC(dznrm2)
+#define icamax_ BLAS_FUNC(icamax)
+#define idamax_ BLAS_FUNC(idamax)
+#define ieeeck_ BLAS_FUNC(ieeeck)
+#define ilaclc_ BLAS_FUNC(ilaclc)
+#define ilaclr_ BLAS_FUNC(ilaclr)
+#define iladlc_ BLAS_FUNC(iladlc)
+#define iladlr_ BLAS_FUNC(iladlr)
+#define ilaenv_ BLAS_FUNC(ilaenv)
+#define ilaslc_ BLAS_FUNC(ilaslc)
+#define ilaslr_ BLAS_FUNC(ilaslr)
+#define ilazlc_ BLAS_FUNC(ilazlc)
+#define ilazlr_ BLAS_FUNC(ilazlr)
+#define iparmq_ BLAS_FUNC(iparmq)
+#define isamax_ BLAS_FUNC(isamax)
+#define izamax_ BLAS_FUNC(izamax)
+#define lsame_ BLAS_FUNC(lsame)
+#define saxpy_ BLAS_FUNC(saxpy)
+#define sbdsdc_ BLAS_FUNC(sbdsdc)
+#define sbdsqr_ BLAS_FUNC(sbdsqr)
+#define scabs1_ BLAS_FUNC(scabs1)
+#define scasum_ BLAS_FUNC(scasum)
+#define scnrm2_ BLAS_FUNC(scnrm2)
+#define scopy_ BLAS_FUNC(scopy)
+#define sdot_ BLAS_FUNC(sdot)
+#define sgebak_ BLAS_FUNC(sgebak)
+#define sgebal_ BLAS_FUNC(sgebal)
+#define sgebd2_ BLAS_FUNC(sgebd2)
+#define sgebrd_ BLAS_FUNC(sgebrd)
+#define sgeev_ BLAS_FUNC(sgeev)
+#define sgehd2_ BLAS_FUNC(sgehd2)
+#define sgehrd_ BLAS_FUNC(sgehrd)
+#define sgelq2_ BLAS_FUNC(sgelq2)
+#define sgelqf_ BLAS_FUNC(sgelqf)
+#define sgelsd_ BLAS_FUNC(sgelsd)
+#define sgemm_ BLAS_FUNC(sgemm)
+#define sgemv_ BLAS_FUNC(sgemv)
+#define sgeqr2_ BLAS_FUNC(sgeqr2)
+#define sgeqrf_ BLAS_FUNC(sgeqrf)
+#define sger_ BLAS_FUNC(sger)
+#define sgesdd_ BLAS_FUNC(sgesdd)
+#define sgesv_ BLAS_FUNC(sgesv)
+#define sgetf2_ BLAS_FUNC(sgetf2)
+#define sgetrf_ BLAS_FUNC(sgetrf)
+#define sgetrs_ BLAS_FUNC(sgetrs)
+#define shseqr_ BLAS_FUNC(shseqr)
+#define sisnan_ BLAS_FUNC(sisnan)
+#define slabad_ BLAS_FUNC(slabad)
+#define slabrd_ BLAS_FUNC(slabrd)
+#define slacpy_ BLAS_FUNC(slacpy)
+#define sladiv_ BLAS_FUNC(sladiv)
+#define slae2_ BLAS_FUNC(slae2)
+#define slaed0_ BLAS_FUNC(slaed0)
+#define slaed1_ BLAS_FUNC(slaed1)
+#define slaed2_ BLAS_FUNC(slaed2)
+#define slaed3_ BLAS_FUNC(slaed3)
+#define slaed4_ BLAS_FUNC(slaed4)
+#define slaed5_ BLAS_FUNC(slaed5)
+#define slaed6_ BLAS_FUNC(slaed6)
+#define slaed7_ BLAS_FUNC(slaed7)
+#define slaed8_ BLAS_FUNC(slaed8)
+#define slaed9_ BLAS_FUNC(slaed9)
+#define slaeda_ BLAS_FUNC(slaeda)
+#define slaev2_ BLAS_FUNC(slaev2)
+#define slaexc_ BLAS_FUNC(slaexc)
+#define slahqr_ BLAS_FUNC(slahqr)
+#define slahr2_ BLAS_FUNC(slahr2)
+#define slaisnan_ BLAS_FUNC(slaisnan)
+#define slaln2_ BLAS_FUNC(slaln2)
+#define slals0_ BLAS_FUNC(slals0)
+#define slalsa_ BLAS_FUNC(slalsa)
+#define slalsd_ BLAS_FUNC(slalsd)
+#define slamc1_ BLAS_FUNC(slamc1)
+#define slamc2_ BLAS_FUNC(slamc2)
+#define slamc3_ BLAS_FUNC(slamc3)
+#define slamc4_ BLAS_FUNC(slamc4)
+#define slamc5_ BLAS_FUNC(slamc5)
+#define slamch_ BLAS_FUNC(slamch)
+#define slamrg_ BLAS_FUNC(slamrg)
+#define slange_ BLAS_FUNC(slange)
+#define slanst_ BLAS_FUNC(slanst)
+#define slansy_ BLAS_FUNC(slansy)
+#define slanv2_ BLAS_FUNC(slanv2)
+#define slapy2_ BLAS_FUNC(slapy2)
+#define slapy3_ BLAS_FUNC(slapy3)
+#define slaqr0_ BLAS_FUNC(slaqr0)
+#define slaqr1_ BLAS_FUNC(slaqr1)
+#define slaqr2_ BLAS_FUNC(slaqr2)
+#define slaqr3_ BLAS_FUNC(slaqr3)
+#define slaqr4_ BLAS_FUNC(slaqr4)
+#define slaqr5_ BLAS_FUNC(slaqr5)
+#define slarf_ BLAS_FUNC(slarf)
+#define slarfb_ BLAS_FUNC(slarfb)
+#define slarfg_ BLAS_FUNC(slarfg)
+#define slarft_ BLAS_FUNC(slarft)
+#define slarfx_ BLAS_FUNC(slarfx)
+#define slartg_ BLAS_FUNC(slartg)
+#define slas2_ BLAS_FUNC(slas2)
+#define slascl_ BLAS_FUNC(slascl)
+#define slasd0_ BLAS_FUNC(slasd0)
+#define slasd1_ BLAS_FUNC(slasd1)
+#define slasd2_ BLAS_FUNC(slasd2)
+#define slasd3_ BLAS_FUNC(slasd3)
+#define slasd4_ BLAS_FUNC(slasd4)
+#define slasd5_ BLAS_FUNC(slasd5)
+#define slasd6_ BLAS_FUNC(slasd6)
+#define slasd7_ BLAS_FUNC(slasd7)
+#define slasd8_ BLAS_FUNC(slasd8)
+#define slasda_ BLAS_FUNC(slasda)
+#define slasdq_ BLAS_FUNC(slasdq)
+#define slasdt_ BLAS_FUNC(slasdt)
+#define slaset_ BLAS_FUNC(slaset)
+#define slasq1_ BLAS_FUNC(slasq1)
+#define slasq2_ BLAS_FUNC(slasq2)
+#define slasq3_ BLAS_FUNC(slasq3)
+#define slasq4_ BLAS_FUNC(slasq4)
+#define slasq5_ BLAS_FUNC(slasq5)
+#define slasq6_ BLAS_FUNC(slasq6)
+#define slasr_ BLAS_FUNC(slasr)
+#define slasrt_ BLAS_FUNC(slasrt)
+#define slassq_ BLAS_FUNC(slassq)
+#define slasv2_ BLAS_FUNC(slasv2)
+#define slaswp_ BLAS_FUNC(slaswp)
+#define slasy2_ BLAS_FUNC(slasy2)
+#define slatrd_ BLAS_FUNC(slatrd)
+#define slauu2_ BLAS_FUNC(slauu2)
+#define slauum_ BLAS_FUNC(slauum)
+#define snrm2_ BLAS_FUNC(snrm2)
+#define sorg2r_ BLAS_FUNC(sorg2r)
+#define sorgbr_ BLAS_FUNC(sorgbr)
+#define sorghr_ BLAS_FUNC(sorghr)
+#define sorgl2_ BLAS_FUNC(sorgl2)
+#define sorglq_ BLAS_FUNC(sorglq)
+#define sorgqr_ BLAS_FUNC(sorgqr)
+#define sorm2l_ BLAS_FUNC(sorm2l)
+#define sorm2r_ BLAS_FUNC(sorm2r)
+#define sormbr_ BLAS_FUNC(sormbr)
+#define sormhr_ BLAS_FUNC(sormhr)
+#define sorml2_ BLAS_FUNC(sorml2)
+#define sormlq_ BLAS_FUNC(sormlq)
+#define sormql_ BLAS_FUNC(sormql)
+#define sormqr_ BLAS_FUNC(sormqr)
+#define sormtr_ BLAS_FUNC(sormtr)
+#define spotf2_ BLAS_FUNC(spotf2)
+#define spotrf_ BLAS_FUNC(spotrf)
+#define spotri_ BLAS_FUNC(spotri)
+#define spotrs_ BLAS_FUNC(spotrs)
+#define srot_ BLAS_FUNC(srot)
+#define sscal_ BLAS_FUNC(sscal)
+#define sstedc_ BLAS_FUNC(sstedc)
+#define ssteqr_ BLAS_FUNC(ssteqr)
+#define ssterf_ BLAS_FUNC(ssterf)
+#define sswap_ BLAS_FUNC(sswap)
+#define ssyevd_ BLAS_FUNC(ssyevd)
+#define ssymv_ BLAS_FUNC(ssymv)
+#define ssyr2_ BLAS_FUNC(ssyr2)
+#define ssyr2k_ BLAS_FUNC(ssyr2k)
+#define ssyrk_ BLAS_FUNC(ssyrk)
+#define ssytd2_ BLAS_FUNC(ssytd2)
+#define ssytrd_ BLAS_FUNC(ssytrd)
+#define strevc_ BLAS_FUNC(strevc)
+#define strexc_ BLAS_FUNC(strexc)
+#define strmm_ BLAS_FUNC(strmm)
+#define strmv_ BLAS_FUNC(strmv)
+#define strsm_ BLAS_FUNC(strsm)
+#define strti2_ BLAS_FUNC(strti2)
+#define strtri_ BLAS_FUNC(strtri)
+#define xerbla_ BLAS_FUNC(xerbla)
+#define zaxpy_ BLAS_FUNC(zaxpy)
+#define zcopy_ BLAS_FUNC(zcopy)
+#define zdotc_ BLAS_FUNC(zdotc)
+#define zdotu_ BLAS_FUNC(zdotu)
+#define zdrot_ BLAS_FUNC(zdrot)
+#define zdscal_ BLAS_FUNC(zdscal)
+#define zgebak_ BLAS_FUNC(zgebak)
+#define zgebal_ BLAS_FUNC(zgebal)
+#define zgebd2_ BLAS_FUNC(zgebd2)
+#define zgebrd_ BLAS_FUNC(zgebrd)
+#define zgeev_ BLAS_FUNC(zgeev)
+#define zgehd2_ BLAS_FUNC(zgehd2)
+#define zgehrd_ BLAS_FUNC(zgehrd)
+#define zgelq2_ BLAS_FUNC(zgelq2)
+#define zgelqf_ BLAS_FUNC(zgelqf)
+#define zgelsd_ BLAS_FUNC(zgelsd)
+#define zgemm_ BLAS_FUNC(zgemm)
+#define zgemv_ BLAS_FUNC(zgemv)
+#define zgeqr2_ BLAS_FUNC(zgeqr2)
+#define zgeqrf_ BLAS_FUNC(zgeqrf)
+#define zgerc_ BLAS_FUNC(zgerc)
+#define zgeru_ BLAS_FUNC(zgeru)
+#define zgesdd_ BLAS_FUNC(zgesdd)
+#define zgesv_ BLAS_FUNC(zgesv)
+#define zgetf2_ BLAS_FUNC(zgetf2)
+#define zgetrf_ BLAS_FUNC(zgetrf)
+#define zgetrs_ BLAS_FUNC(zgetrs)
+#define zheevd_ BLAS_FUNC(zheevd)
+#define zhemv_ BLAS_FUNC(zhemv)
+#define zher2_ BLAS_FUNC(zher2)
+#define zher2k_ BLAS_FUNC(zher2k)
+#define zherk_ BLAS_FUNC(zherk)
+#define zhetd2_ BLAS_FUNC(zhetd2)
+#define zhetrd_ BLAS_FUNC(zhetrd)
+#define zhseqr_ BLAS_FUNC(zhseqr)
+#define zlabrd_ BLAS_FUNC(zlabrd)
+#define zlacgv_ BLAS_FUNC(zlacgv)
+#define zlacp2_ BLAS_FUNC(zlacp2)
+#define zlacpy_ BLAS_FUNC(zlacpy)
+#define zlacrm_ BLAS_FUNC(zlacrm)
+#define zladiv_ BLAS_FUNC(zladiv)
+#define zlaed0_ BLAS_FUNC(zlaed0)
+#define zlaed7_ BLAS_FUNC(zlaed7)
+#define zlaed8_ BLAS_FUNC(zlaed8)
+#define zlahqr_ BLAS_FUNC(zlahqr)
+#define zlahr2_ BLAS_FUNC(zlahr2)
+#define zlals0_ BLAS_FUNC(zlals0)
+#define zlalsa_ BLAS_FUNC(zlalsa)
+#define zlalsd_ BLAS_FUNC(zlalsd)
+#define zlange_ BLAS_FUNC(zlange)
+#define zlanhe_ BLAS_FUNC(zlanhe)
+#define zlaqr0_ BLAS_FUNC(zlaqr0)
+#define zlaqr1_ BLAS_FUNC(zlaqr1)
+#define zlaqr2_ BLAS_FUNC(zlaqr2)
+#define zlaqr3_ BLAS_FUNC(zlaqr3)
+#define zlaqr4_ BLAS_FUNC(zlaqr4)
+#define zlaqr5_ BLAS_FUNC(zlaqr5)
+#define zlarcm_ BLAS_FUNC(zlarcm)
+#define zlarf_ BLAS_FUNC(zlarf)
+#define zlarfb_ BLAS_FUNC(zlarfb)
+#define zlarfg_ BLAS_FUNC(zlarfg)
+#define zlarft_ BLAS_FUNC(zlarft)
+#define zlartg_ BLAS_FUNC(zlartg)
+#define zlascl_ BLAS_FUNC(zlascl)
+#define zlaset_ BLAS_FUNC(zlaset)
+#define zlasr_ BLAS_FUNC(zlasr)
+#define zlassq_ BLAS_FUNC(zlassq)
+#define zlaswp_ BLAS_FUNC(zlaswp)
+#define zlatrd_ BLAS_FUNC(zlatrd)
+#define zlatrs_ BLAS_FUNC(zlatrs)
+#define zlauu2_ BLAS_FUNC(zlauu2)
+#define zlauum_ BLAS_FUNC(zlauum)
+#define zpotf2_ BLAS_FUNC(zpotf2)
+#define zpotrf_ BLAS_FUNC(zpotrf)
+#define zpotri_ BLAS_FUNC(zpotri)
+#define zpotrs_ BLAS_FUNC(zpotrs)
+#define zrot_ BLAS_FUNC(zrot)
+#define zscal_ BLAS_FUNC(zscal)
+#define zstedc_ BLAS_FUNC(zstedc)
+#define zsteqr_ BLAS_FUNC(zsteqr)
+#define zswap_ BLAS_FUNC(zswap)
+#define ztrevc_ BLAS_FUNC(ztrevc)
+#define ztrexc_ BLAS_FUNC(ztrexc)
+#define ztrmm_ BLAS_FUNC(ztrmm)
+#define ztrmv_ BLAS_FUNC(ztrmv)
+#define ztrsm_ BLAS_FUNC(ztrsm)
+#define ztrsv_ BLAS_FUNC(ztrsv)
+#define ztrti2_ BLAS_FUNC(ztrti2)
+#define ztrtri_ BLAS_FUNC(ztrtri)
+#define zung2r_ BLAS_FUNC(zung2r)
+#define zungbr_ BLAS_FUNC(zungbr)
+#define zunghr_ BLAS_FUNC(zunghr)
+#define zungl2_ BLAS_FUNC(zungl2)
+#define zunglq_ BLAS_FUNC(zunglq)
+#define zungqr_ BLAS_FUNC(zungqr)
+#define zunm2l_ BLAS_FUNC(zunm2l)
+#define zunm2r_ BLAS_FUNC(zunm2r)
+#define zunmbr_ BLAS_FUNC(zunmbr)
+#define zunmhr_ BLAS_FUNC(zunmhr)
+#define zunml2_ BLAS_FUNC(zunml2)
+#define zunmlq_ BLAS_FUNC(zunmlq)
+#define zunmql_ BLAS_FUNC(zunmql)
+#define zunmqr_ BLAS_FUNC(zunmqr)
+#define zunmtr_ BLAS_FUNC(zunmtr)
+
+/* Symbols exported by f2c.c */
+#define abort_ numpy_lapack_lite_abort_
+#define c_abs numpy_lapack_lite_c_abs
+#define c_cos numpy_lapack_lite_c_cos
+#define c_div numpy_lapack_lite_c_div
+#define c_exp numpy_lapack_lite_c_exp
+#define c_log numpy_lapack_lite_c_log
+#define c_sin numpy_lapack_lite_c_sin
+#define c_sqrt numpy_lapack_lite_c_sqrt
+#define d_abs numpy_lapack_lite_d_abs
+#define d_acos numpy_lapack_lite_d_acos
+#define d_asin numpy_lapack_lite_d_asin
+#define d_atan numpy_lapack_lite_d_atan
+#define d_atn2 numpy_lapack_lite_d_atn2
+#define d_cnjg numpy_lapack_lite_d_cnjg
+#define d_cos numpy_lapack_lite_d_cos
+#define d_cosh numpy_lapack_lite_d_cosh
+#define d_dim numpy_lapack_lite_d_dim
+#define d_exp numpy_lapack_lite_d_exp
+#define d_imag numpy_lapack_lite_d_imag
+#define d_int numpy_lapack_lite_d_int
+#define d_lg10 numpy_lapack_lite_d_lg10
+#define d_log numpy_lapack_lite_d_log
+#define d_mod numpy_lapack_lite_d_mod
+#define d_nint numpy_lapack_lite_d_nint
+#define d_prod numpy_lapack_lite_d_prod
+#define d_sign numpy_lapack_lite_d_sign
+#define d_sin numpy_lapack_lite_d_sin
+#define d_sinh numpy_lapack_lite_d_sinh
+#define d_sqrt numpy_lapack_lite_d_sqrt
+#define d_tan numpy_lapack_lite_d_tan
+#define d_tanh numpy_lapack_lite_d_tanh
+#define derf_ numpy_lapack_lite_derf_
+#define derfc_ numpy_lapack_lite_derfc_
+#define do_fio numpy_lapack_lite_do_fio
+#define do_lio numpy_lapack_lite_do_lio
+#define do_uio numpy_lapack_lite_do_uio
+#define e_rdfe numpy_lapack_lite_e_rdfe
+#define e_rdue numpy_lapack_lite_e_rdue
+#define e_rsfe numpy_lapack_lite_e_rsfe
+#define e_rsfi numpy_lapack_lite_e_rsfi
+#define e_rsle numpy_lapack_lite_e_rsle
+#define e_rsli numpy_lapack_lite_e_rsli
+#define e_rsue numpy_lapack_lite_e_rsue
+#define e_wdfe numpy_lapack_lite_e_wdfe
+#define e_wdue numpy_lapack_lite_e_wdue
+#define e_wsfe numpy_lapack_lite_e_wsfe
+#define e_wsfi numpy_lapack_lite_e_wsfi
+#define e_wsle numpy_lapack_lite_e_wsle
+#define e_wsli numpy_lapack_lite_e_wsli
+#define e_wsue numpy_lapack_lite_e_wsue
+#define ef1asc_ numpy_lapack_lite_ef1asc_
+#define ef1cmc_ numpy_lapack_lite_ef1cmc_
+#define erf_ numpy_lapack_lite_erf_
+#define erfc_ numpy_lapack_lite_erfc_
+#define f__cabs numpy_lapack_lite_f__cabs
+#define f__cabsf numpy_lapack_lite_f__cabsf
+#define f_back numpy_lapack_lite_f_back
+#define f_clos numpy_lapack_lite_f_clos
+#define f_end numpy_lapack_lite_f_end
+#define f_exit numpy_lapack_lite_f_exit
+#define f_inqu numpy_lapack_lite_f_inqu
+#define f_open numpy_lapack_lite_f_open
+#define f_rew numpy_lapack_lite_f_rew
+#define flush_ numpy_lapack_lite_flush_
+#define getarg_ numpy_lapack_lite_getarg_
+#define getenv_ numpy_lapack_lite_getenv_
+#define h_abs numpy_lapack_lite_h_abs
+#define h_dim numpy_lapack_lite_h_dim
+#define h_dnnt numpy_lapack_lite_h_dnnt
+#define h_indx numpy_lapack_lite_h_indx
+#define h_len numpy_lapack_lite_h_len
+#define h_mod numpy_lapack_lite_h_mod
+#define h_nint numpy_lapack_lite_h_nint
+#define h_sign numpy_lapack_lite_h_sign
+#define hl_ge numpy_lapack_lite_hl_ge
+#define hl_gt numpy_lapack_lite_hl_gt
+#define hl_le numpy_lapack_lite_hl_le
+#define hl_lt numpy_lapack_lite_hl_lt
+#define i_abs numpy_lapack_lite_i_abs
+#define i_dim numpy_lapack_lite_i_dim
+#define i_dnnt numpy_lapack_lite_i_dnnt
+#define i_indx numpy_lapack_lite_i_indx
+#define i_len numpy_lapack_lite_i_len
+#define i_mod numpy_lapack_lite_i_mod
+#define i_nint numpy_lapack_lite_i_nint
+#define i_sign numpy_lapack_lite_i_sign
+#define iargc_ numpy_lapack_lite_iargc_
+#define l_ge numpy_lapack_lite_l_ge
+#define l_gt numpy_lapack_lite_l_gt
+#define l_le numpy_lapack_lite_l_le
+#define l_lt numpy_lapack_lite_l_lt
+#define pow_ci numpy_lapack_lite_pow_ci
+#define pow_dd numpy_lapack_lite_pow_dd
+#define pow_di numpy_lapack_lite_pow_di
+#define pow_hh numpy_lapack_lite_pow_hh
+#define pow_ii numpy_lapack_lite_pow_ii
+#define pow_ri numpy_lapack_lite_pow_ri
+#define pow_zi numpy_lapack_lite_pow_zi
+#define pow_zz numpy_lapack_lite_pow_zz
+#define r_abs numpy_lapack_lite_r_abs
+#define r_acos numpy_lapack_lite_r_acos
+#define r_asin numpy_lapack_lite_r_asin
+#define r_atan numpy_lapack_lite_r_atan
+#define r_atn2 numpy_lapack_lite_r_atn2
+#define r_cnjg numpy_lapack_lite_r_cnjg
+#define r_cos numpy_lapack_lite_r_cos
+#define r_cosh numpy_lapack_lite_r_cosh
+#define r_dim numpy_lapack_lite_r_dim
+#define r_exp numpy_lapack_lite_r_exp
+#define r_imag numpy_lapack_lite_r_imag
+#define r_int numpy_lapack_lite_r_int
+#define r_lg10 numpy_lapack_lite_r_lg10
+#define r_log numpy_lapack_lite_r_log
+#define r_mod numpy_lapack_lite_r_mod
+#define r_nint numpy_lapack_lite_r_nint
+#define r_sign numpy_lapack_lite_r_sign
+#define r_sin numpy_lapack_lite_r_sin
+#define r_sinh numpy_lapack_lite_r_sinh
+#define r_sqrt numpy_lapack_lite_r_sqrt
+#define r_tan numpy_lapack_lite_r_tan
+#define r_tanh numpy_lapack_lite_r_tanh
+#define s_cat numpy_lapack_lite_s_cat
+#define s_cmp numpy_lapack_lite_s_cmp
+#define s_copy numpy_lapack_lite_s_copy
+#define s_paus numpy_lapack_lite_s_paus
+#define s_rdfe numpy_lapack_lite_s_rdfe
+#define s_rdue numpy_lapack_lite_s_rdue
+#define s_rnge numpy_lapack_lite_s_rnge
+#define s_rsfe numpy_lapack_lite_s_rsfe
+#define s_rsfi numpy_lapack_lite_s_rsfi
+#define s_rsle numpy_lapack_lite_s_rsle
+#define s_rsli numpy_lapack_lite_s_rsli
+#define s_rsne numpy_lapack_lite_s_rsne
+#define s_rsni numpy_lapack_lite_s_rsni
+#define s_rsue numpy_lapack_lite_s_rsue
+#define s_stop numpy_lapack_lite_s_stop
+#define s_wdfe numpy_lapack_lite_s_wdfe
+#define s_wdue numpy_lapack_lite_s_wdue
+#define s_wsfe numpy_lapack_lite_s_wsfe
+#define s_wsfi numpy_lapack_lite_s_wsfi
+#define s_wsle numpy_lapack_lite_s_wsle
+#define s_wsli numpy_lapack_lite_s_wsli
+#define s_wsne numpy_lapack_lite_s_wsne
+#define s_wsni numpy_lapack_lite_s_wsni
+#define s_wsue numpy_lapack_lite_s_wsue
+#define sig_die numpy_lapack_lite_sig_die
+#define signal_ numpy_lapack_lite_signal_
+#define system_ numpy_lapack_lite_system_
+#define z_abs numpy_lapack_lite_z_abs
+#define z_cos numpy_lapack_lite_z_cos
+#define z_div numpy_lapack_lite_z_div
+#define z_exp numpy_lapack_lite_z_exp
+#define z_log numpy_lapack_lite_z_log
+#define z_sin numpy_lapack_lite_z_sin
+#define z_sqrt numpy_lapack_lite_z_sqrt
diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py
index 61102d6ab..23921acf4 100755
--- a/numpy/linalg/lapack_lite/make_lite.py
+++ b/numpy/linalg/lapack_lite/make_lite.py
@@ -1,6 +1,6 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""
-Usage: make_lite.py <wrapped_routines_file> <lapack_dir> <output_dir>
+Usage: make_lite.py <wrapped_routines_file> <lapack_dir>
Typical invocation:
@@ -11,22 +11,16 @@ Requires the following to be on the path:
* patch
"""
-from __future__ import division, absolute_import, print_function
-
import sys
import os
+import re
import subprocess
import shutil
import fortran
import clapack_scrub
-PY2 = sys.version_info < (3, 0)
-
-if PY2:
- from distutils.spawn import find_executable as which
-else:
- from shutil import which
+from shutil import which
# Arguments to pass to f2c. You'll always want -A for ANSI C prototypes
# Others of interest: -a to not make variables static by default
@@ -35,11 +29,14 @@ F2C_ARGS = ['-A', '-Nx800']
# The header to add to the top of the f2c_*.c file. Note that dlamch_() calls
# will be replaced by the macros below by clapack_scrub.scrub_source()
-HEADER = '''\
+HEADER_BLURB = '''\
/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
+ * NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
+ * information on remaking this file.
+ */
+'''
+
+HEADER = HEADER_BLURB + '''\
#include "f2c.h"
#ifdef HAVE_CONFIG
@@ -65,7 +62,7 @@ them.
#endif
'''
-class FortranRoutine(object):
+class FortranRoutine:
"""Wrapper for a Fortran routine in a file.
"""
type = 'generic'
@@ -97,7 +94,7 @@ class UnknownFortranRoutine(FortranRoutine):
def dependencies(self):
return []
-class FortranLibrary(object):
+class FortranLibrary:
"""Container for a bunch of Fortran routines.
"""
def __init__(self, src_dirs):
@@ -281,6 +278,52 @@ def ensure_executable(name):
except:
raise SystemExit(name + ' not found')
+def create_name_header(output_dir):
+ routine_re = re.compile(r'^ (subroutine|.* function)\s+(\w+)\(.*$',
+ re.I)
+ extern_re = re.compile(r'^extern [a-z]+ ([a-z0-9_]+)\(.*$')
+
+ # BLAS/LAPACK symbols
+ symbols = set(['xerbla'])
+ for fn in os.listdir(output_dir):
+ fn = os.path.join(output_dir, fn)
+
+ if not fn.endswith('.f'):
+ continue
+
+ with open(fn, 'r') as f:
+ for line in f:
+ m = routine_re.match(line)
+ if m:
+ symbols.add(m.group(2).lower())
+
+ # f2c symbols
+ f2c_symbols = set()
+ with open('f2c.h', 'r') as f:
+ for line in f:
+ m = extern_re.match(line)
+ if m:
+ f2c_symbols.add(m.group(1))
+
+ with open(os.path.join(output_dir, 'lapack_lite_names.h'), 'w') as f:
+ f.write(HEADER_BLURB)
+ f.write(
+ "/*\n"
+ " * This file renames all BLAS/LAPACK and f2c symbols to avoid\n"
+ " * dynamic symbol name conflicts, in cases where e.g.\n"
+ " * integer sizes do not match with 'standard' ABI.\n"
+ " */\n")
+
+ # Rename BLAS/LAPACK symbols
+ for name in sorted(symbols):
+ f.write("#define %s_ BLAS_FUNC(%s)\n" % (name, name))
+
+ # Rename also symbols that f2c exports itself
+ f.write("\n"
+ "/* Symbols exported by f2c.c */\n")
+ for name in sorted(f2c_symbols):
+ f.write("#define %s numpy_lapack_lite_%s\n" % (name, name))
+
def main():
if len(sys.argv) != 3:
print(__doc__)
@@ -330,12 +373,14 @@ def main():
print()
+ create_name_header(output_dir)
+
for fname in os.listdir(output_dir):
- if fname.endswith('.c'):
+ if fname.endswith('.c') or fname == 'lapack_lite_names.h':
print('Copying ' + fname)
shutil.copy(
os.path.join(output_dir, fname),
- os.path.dirname(__file__),
+ os.path.abspath(os.path.dirname(__file__)),
)
diff --git a/numpy/linalg/lapack_lite/python_xerbla.c b/numpy/linalg/lapack_lite/python_xerbla.c
index dfc195556..fe2f718b2 100644
--- a/numpy/linalg/lapack_lite/python_xerbla.c
+++ b/numpy/linalg/lapack_lite/python_xerbla.c
@@ -1,7 +1,6 @@
#include "Python.h"
-
-#undef c_abs
-#include "f2c.h"
+#include "numpy/npy_common.h"
+#include "npy_cblas.h"
/*
From the original manpage:
@@ -20,7 +19,7 @@
info: Number of the invalid parameter.
*/
-int xerbla_(char *srname, integer *info)
+CBLAS_INT BLAS_FUNC(xerbla)(char *srname, CBLAS_INT *info)
{
static const char format[] = "On entry to %.*s" \
" parameter number %d had an illegal value";
@@ -38,7 +37,7 @@ int xerbla_(char *srname, integer *info)
#ifdef WITH_THREAD
save = PyGILState_Ensure();
#endif
- PyOS_snprintf(buf, sizeof(buf), format, len, srname, *info);
+ PyOS_snprintf(buf, sizeof(buf), format, len, srname, (int)*info);
PyErr_SetString(PyExc_ValueError, buf);
#ifdef WITH_THREAD
PyGILState_Release(save);
diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c
index 696a6d874..362a593a6 100644
--- a/numpy/linalg/lapack_litemodule.c
+++ b/numpy/linalg/lapack_litemodule.c
@@ -6,46 +6,66 @@ More modifications by Jeff Whitaker
#include "Python.h"
#include "numpy/arrayobject.h"
+#include "npy_cblas.h"
+
+
+#define FNAME(name) BLAS_FUNC(name)
+
+typedef CBLAS_INT fortran_int;
+
+#ifdef HAVE_BLAS_ILP64
+
+#if NPY_BITSOF_SHORT == 64
+#define FINT_PYFMT "h"
+#elif NPY_BITSOF_INT == 64
+#define FINT_PYFMT "i"
+#elif NPY_BITSOF_LONG == 64
+#define FINT_PYFMT "l"
+#elif NPY_BITSOF_LONGLONG == 64
+#define FINT_PYFMT "L"
+#else
+#error No compatible 64-bit integer size. \
+ Please contact NumPy maintainers and give detailed information about your \
+ compiler and platform, or set NPY_USE_BLAS64_=0
+#endif
-#ifdef NO_APPEND_FORTRAN
-# define FNAME(x) x
#else
-# define FNAME(x) x##_
+#define FINT_PYFMT "i"
#endif
typedef struct { float r, i; } f2c_complex;
typedef struct { double r, i; } f2c_doublecomplex;
/* typedef long int (*L_fp)(); */
-extern int FNAME(dgelsd)(int *m, int *n, int *nrhs,
- double a[], int *lda, double b[], int *ldb,
- double s[], double *rcond, int *rank,
- double work[], int *lwork, int iwork[], int *info);
+extern fortran_int FNAME(dgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda, double b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ double work[], fortran_int *lwork, fortran_int iwork[], fortran_int *info);
-extern int FNAME(zgelsd)(int *m, int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- f2c_doublecomplex b[], int *ldb,
- double s[], double *rcond, int *rank,
- f2c_doublecomplex work[], int *lwork,
- double rwork[], int iwork[], int *info);
+extern fortran_int FNAME(zgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ f2c_doublecomplex b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ f2c_doublecomplex work[], fortran_int *lwork,
+ double rwork[], fortran_int iwork[], fortran_int *info);
-extern int FNAME(dgeqrf)(int *m, int *n, double a[], int *lda,
+extern fortran_int FNAME(dgeqrf)(fortran_int *m, fortran_int *n, double a[], fortran_int *lda,
double tau[], double work[],
- int *lwork, int *info);
+ fortran_int *lwork, fortran_int *info);
-extern int FNAME(zgeqrf)(int *m, int *n, f2c_doublecomplex a[], int *lda,
+extern fortran_int FNAME(zgeqrf)(fortran_int *m, fortran_int *n, f2c_doublecomplex a[], fortran_int *lda,
f2c_doublecomplex tau[], f2c_doublecomplex work[],
- int *lwork, int *info);
+ fortran_int *lwork, fortran_int *info);
-extern int FNAME(dorgqr)(int *m, int *n, int *k, double a[], int *lda,
+extern fortran_int FNAME(dorgqr)(fortran_int *m, fortran_int *n, fortran_int *k, double a[], fortran_int *lda,
double tau[], double work[],
- int *lwork, int *info);
+ fortran_int *lwork, fortran_int *info);
-extern int FNAME(zungqr)(int *m, int *n, int *k, f2c_doublecomplex a[],
- int *lda, f2c_doublecomplex tau[],
- f2c_doublecomplex work[], int *lwork, int *info);
+extern fortran_int FNAME(zungqr)(fortran_int *m, fortran_int *n, fortran_int *k, f2c_doublecomplex a[],
+ fortran_int *lda, f2c_doublecomplex tau[],
+ f2c_doublecomplex work[], fortran_int *lwork, fortran_int *info);
-extern int FNAME(xerbla)(char *srname, int *info);
+extern fortran_int FNAME(xerbla)(char *srname, fortran_int *info);
static PyObject *LapackError;
@@ -90,27 +110,31 @@ check_object(PyObject *ob, int t, char *obname,
#define FDATA(p) ((float *) PyArray_DATA((PyArrayObject *)p))
#define CDATA(p) ((f2c_complex *) PyArray_DATA((PyArrayObject *)p))
#define ZDATA(p) ((f2c_doublecomplex *) PyArray_DATA((PyArrayObject *)p))
-#define IDATA(p) ((int *) PyArray_DATA((PyArrayObject *)p))
+#define IDATA(p) ((fortran_int *) PyArray_DATA((PyArrayObject *)p))
static PyObject *
lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m;
- int n;
- int nrhs;
+ fortran_int lapack_lite_status;
+ fortran_int m;
+ fortran_int n;
+ fortran_int nrhs;
PyObject *a;
- int lda;
+ fortran_int lda;
PyObject *b;
- int ldb;
+ fortran_int ldb;
PyObject *s;
double rcond;
- int rank;
+ fortran_int rank;
PyObject *work;
PyObject *iwork;
- int lwork;
- int info;
- TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOi:dgelsd",
+ fortran_int lwork;
+ fortran_int info;
+
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT "O"
+ FINT_PYFMT "O" "d" FINT_PYFMT "O" FINT_PYFMT "O"
+ FINT_PYFMT ":dgelsd"),
&m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond,
&rank,&work,&lwork,&iwork,&info));
@@ -118,7 +142,11 @@ lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
TRY(check_object(b,NPY_DOUBLE,"b","NPY_DOUBLE","dgelsd"));
TRY(check_object(s,NPY_DOUBLE,"s","NPY_DOUBLE","dgelsd"));
TRY(check_object(work,NPY_DOUBLE,"work","NPY_DOUBLE","dgelsd"));
+#ifndef NPY_UMATH_USE_BLAS64_
TRY(check_object(iwork,NPY_INT,"iwork","NPY_INT","dgelsd"));
+#else
+ TRY(check_object(iwork,NPY_INT64,"iwork","NPY_INT64","dgelsd"));
+#endif
lapack_lite_status =
FNAME(dgelsd)(&m,&n,&nrhs,DDATA(a),&lda,DDATA(b),&ldb,
@@ -128,8 +156,11 @@ lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:d,s:i,s:i,s:i}","dgelsd_",
- lapack_lite_status,"m",m,"n",n,"nrhs",nrhs,
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:d,s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT "}"),
+ "dgelsd_",lapack_lite_status,"m",m,"n",n,"nrhs",nrhs,
"lda",lda,"ldb",ldb,"rcond",rcond,"rank",rank,
"lwork",lwork,"info",info);
}
@@ -137,13 +168,16 @@ lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
+ fortran_int lda;
+ fortran_int info;
- TRY(PyArg_ParseTuple(args,"iiOiOOii:dgeqrf",&m,&n,&a,&lda,&tau,&work,&lwork,&info));
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT "OO"
+ FINT_PYFMT FINT_PYFMT ":dgeqrf"),
+ &m,&n,&a,&lda,&tau,&work,&lwork,&info));
/* check objects and convert to right storage order */
TRY(check_object(a,NPY_DOUBLE,"a","NPY_DOUBLE","dgeqrf"));
@@ -157,7 +191,9 @@ lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","dgeqrf_",
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT "}"),
+ "dgeqrf_",
lapack_lite_status,"m",m,"n",n,"lda",lda,
"lwork",lwork,"info",info);
}
@@ -166,13 +202,17 @@ lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_dorgqr(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, k, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, k, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
-
- TRY(PyArg_ParseTuple(args,"iiiOiOOii:dorgqr", &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
+ fortran_int lda;
+ fortran_int info;
+
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O"
+ FINT_PYFMT "OO" FINT_PYFMT FINT_PYFMT
+ ":dorgqr"),
+ &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
TRY(check_object(a,NPY_DOUBLE,"a","NPY_DOUBLE","dorgqr"));
TRY(check_object(tau,NPY_DOUBLE,"tau","NPY_DOUBLE","dorgqr"));
TRY(check_object(work,NPY_DOUBLE,"work","NPY_DOUBLE","dorgqr"));
@@ -191,23 +231,26 @@ lapack_lite_dorgqr(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m;
- int n;
- int nrhs;
+ fortran_int lapack_lite_status;
+ fortran_int m;
+ fortran_int n;
+ fortran_int nrhs;
PyObject *a;
- int lda;
+ fortran_int lda;
PyObject *b;
- int ldb;
+ fortran_int ldb;
PyObject *s;
double rcond;
- int rank;
+ fortran_int rank;
PyObject *work;
- int lwork;
+ fortran_int lwork;
PyObject *rwork;
PyObject *iwork;
- int info;
- TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOOi:zgelsd",
+ fortran_int info;
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT
+ "O" FINT_PYFMT "Od" FINT_PYFMT "O" FINT_PYFMT
+ "OO" FINT_PYFMT ":zgelsd"),
&m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond,
&rank,&work,&lwork,&rwork,&iwork,&info));
@@ -216,7 +259,11 @@ lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
TRY(check_object(s,NPY_DOUBLE,"s","NPY_DOUBLE","zgelsd"));
TRY(check_object(work,NPY_CDOUBLE,"work","NPY_CDOUBLE","zgelsd"));
TRY(check_object(rwork,NPY_DOUBLE,"rwork","NPY_DOUBLE","zgelsd"));
+#ifndef NPY_UMATH_USE_BLAS64_
TRY(check_object(iwork,NPY_INT,"iwork","NPY_INT","zgelsd"));
+#else
+ TRY(check_object(iwork,NPY_INT64,"iwork","NPY_INT64","zgelsd"));
+#endif
lapack_lite_status =
FNAME(zgelsd)(&m,&n,&nrhs,ZDATA(a),&lda,ZDATA(b),&ldb,DDATA(s),&rcond,
@@ -225,7 +272,11 @@ lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","zgelsd_",
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ "}"),
+ "zgelsd_",
lapack_lite_status,"m",m,"n",n,"nrhs",nrhs,"lda",lda,
"ldb",ldb,"rank",rank,"lwork",lwork,"info",info);
}
@@ -233,13 +284,16 @@ lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_zgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
+ fortran_int lda;
+ fortran_int info;
- TRY(PyArg_ParseTuple(args,"iiOiOOii:zgeqrf",&m,&n,&a,&lda,&tau,&work,&lwork,&info));
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT "OO"
+ FINT_PYFMT "" FINT_PYFMT ":zgeqrf"),
+ &m,&n,&a,&lda,&tau,&work,&lwork,&info));
/* check objects and convert to right storage order */
TRY(check_object(a,NPY_CDOUBLE,"a","NPY_CDOUBLE","zgeqrf"));
@@ -253,20 +307,27 @@ lapack_lite_zgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","zgeqrf_",lapack_lite_status,"m",m,"n",n,"lda",lda,"lwork",lwork,"info",info);
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT "}"),
+ "zgeqrf_",lapack_lite_status,"m",m,"n",n,"lda",lda,"lwork",lwork,"info",info);
}
static PyObject *
lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, k, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, k, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
-
- TRY(PyArg_ParseTuple(args,"iiiOiOOii:zungqr", &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
+ fortran_int lda;
+ fortran_int info;
+
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O"
+ FINT_PYFMT "OO" FINT_PYFMT "" FINT_PYFMT
+ ":zungqr"),
+ &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
TRY(check_object(a,NPY_CDOUBLE,"a","NPY_CDOUBLE","zungqr"));
TRY(check_object(tau,NPY_CDOUBLE,"tau","NPY_CDOUBLE","zungqr"));
TRY(check_object(work,NPY_CDOUBLE,"work","NPY_CDOUBLE","zungqr"));
@@ -279,7 +340,8 @@ lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i}","zungqr_",lapack_lite_status,
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT "}"),
+ "zungqr_",lapack_lite_status,
"info",info);
}
@@ -287,7 +349,7 @@ lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_xerbla(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int info = -1;
+ fortran_int info = -1;
NPY_BEGIN_THREADS_DEF;
NPY_BEGIN_THREADS;
@@ -315,7 +377,6 @@ static struct PyMethodDef lapack_lite_module_methods[] = {
};
-#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"lapack_lite",
@@ -327,32 +388,25 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
/* Initialization function for the module */
-#if PY_MAJOR_VERSION >= 3
-#define RETVAL(x) x
PyMODINIT_FUNC PyInit_lapack_lite(void)
-#else
-#define RETVAL(x)
-PyMODINIT_FUNC
-initlapack_lite(void)
-#endif
{
PyObject *m,*d;
-#if PY_MAJOR_VERSION >= 3
m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule4("lapack_lite", lapack_lite_module_methods,
- "", (PyObject*)NULL,PYTHON_API_VERSION);
-#endif
if (m == NULL) {
- return RETVAL(NULL);
+ return NULL;
}
import_array();
d = PyModule_GetDict(m);
LapackError = PyErr_NewException("lapack_lite.LapackError", NULL, NULL);
PyDict_SetItemString(d, "LapackError", LapackError);
- return RETVAL(m);
+#ifdef HAVE_BLAS_ILP64
+ PyDict_SetItemString(d, "_ilp64", Py_True);
+#else
+ PyDict_SetItemString(d, "_ilp64", Py_False);
+#endif
+
+ return m;
}
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 816a200eb..eac6267c6 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -8,8 +8,6 @@ version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
@@ -26,7 +24,7 @@ from numpy.core import (
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
- swapaxes, divide, count_nonzero, isnan, sign
+ swapaxes, divide, count_nonzero, isnan, sign, argsort, sort
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
@@ -39,13 +37,6 @@ array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.linalg')
-# For Python2/3 compatibility
-_N = b'N'
-_V = b'V'
-_A = b'A'
-_S = b'S'
-_L = b'L'
-
fortran_int = intc
@@ -194,37 +185,33 @@ def _fastCopyAndTranspose(type, *arrays):
else:
return cast_arrays
-def _assertRank2(*arrays):
+def _assert_2d(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
-def _assertRankAtLeast2(*arrays):
+def _assert_stacked_2d(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
-def _assertNdSquareness(*arrays):
+def _assert_stacked_square(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
-def _assertFinite(*arrays):
+def _assert_finite(*arrays):
for a in arrays:
- if not (isfinite(a).all()):
+ if not isfinite(a).all():
raise LinAlgError("Array must not contain infs or NaNs")
-def _isEmpty2d(arr):
+def _is_empty_2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
-def _assertNoEmpty2d(*arrays):
- for a in arrays:
- if _isEmpty2d(a):
- raise LinAlgError("Arrays cannot be empty")
def transpose(a):
"""
@@ -349,6 +336,10 @@ def solve(a, b):
LinAlgError
If `a` is singular or not square.
+ See Also
+ --------
+ scipy.linalg.solve : Similar function in SciPy.
+
Notes
-----
@@ -386,8 +377,8 @@ def solve(a, b):
"""
a, _ = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
@@ -506,6 +497,10 @@ def inv(a):
LinAlgError
If `a` is not square or inversion fails.
+ See Also
+ --------
+ scipy.linalg.inv : Similar function in SciPy.
+
Notes
-----
@@ -542,8 +537,8 @@ def inv(a):
"""
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
@@ -622,8 +617,8 @@ def matrix_power(a, n):
"""
a = asanyarray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
try:
n = operator.index(n)
@@ -683,8 +678,10 @@ def cholesky(a):
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
- Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
- actually returned.
+ Hermitian (symmetric if real-valued) and positive-definite. No
+ checking is performed to verify whether `a` is Hermitian or not.
+ In addition, only the lower-triangular and diagonal elements of `a`
+ are used. Only `L` is actually returned.
Parameters
----------
@@ -704,6 +701,14 @@ def cholesky(a):
If the decomposition fails, for example, if `a` is not
positive-definite.
+ See Also
+ --------
+ scipy.linalg.cholesky : Similar function in SciPy.
+ scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian
+ positive-definite matrix.
+ scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in
+ `scipy.linalg.cho_solve`.
+
Notes
-----
@@ -752,15 +757,15 @@ def cholesky(a):
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
-# QR decompostion
+# QR decomposition
def _qr_dispatcher(a, mode=None):
return (a,)
@@ -816,6 +821,11 @@ def qr(a, mode='reduced'):
LinAlgError
If factoring fails.
+ See Also
+ --------
+ scipy.linalg.qr : Similar function in SciPy.
+ scipy.linalg.rq : Compute RQ decomposition of a matrix.
+
Notes
-----
This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
@@ -895,7 +905,7 @@ def qr(a, mode='reduced'):
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
- _assertRank2(a)
+ _assert_2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
@@ -1008,6 +1018,7 @@ def eigvals(a):
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
+ scipy.linalg.eigvals : Similar function in SciPy.
Notes
-----
@@ -1047,9 +1058,9 @@ def eigvals(a):
"""
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- _assertFinite(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ _assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
@@ -1109,6 +1120,7 @@ def eigvalsh(a, UPLO='L'):
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
+ scipy.linalg.eigvalsh : Similar function in SciPy.
Notes
-----
@@ -1157,8 +1169,8 @@ def eigvalsh(a, UPLO='L'):
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
@@ -1207,12 +1219,14 @@ def eig(a):
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
-
eigh : eigenvalues and eigenvectors of a real symmetric or complex
Hermitian (conjugate symmetric) array.
-
eigvalsh : eigenvalues of a real symmetric or complex Hermitian
(conjugate symmetric) array.
+ scipy.linalg.eig : Similar function in SciPy that also solves the
+ generalized eigenvalue problem.
+ scipy.linalg.schur : Best choice for unitary and other non-Hermitian
+ normal matrices.
Notes
-----
@@ -1226,21 +1240,26 @@ def eig(a):
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
- `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
- `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
+ `v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and
+ `v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
- the eigenvectors are linearly independent. Likewise, the (complex-valued)
- matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
- if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
- transpose of `a`.
+ the eigenvectors are linearly independent and `a` can be diagonalized by
+ a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.
+
+ For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
+ is preferred because the matrix `v` is guaranteed to be unitary, which is
+ not the case when using `eig`. The Schur factorization produces an
+ upper triangular matrix rather than a diagonal matrix, but for normal
+ matrices only the diagonal of the upper triangular matrix is needed, the
+ rest is roundoff error.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
- ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
+ ``y.T @ a = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
@@ -1294,9 +1313,9 @@ def eig(a):
"""
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- _assertFinite(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ _assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
@@ -1359,6 +1378,8 @@ def eigh(a, UPLO='L'):
(conjugate symmetric) arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
+ scipy.linalg.eigh : Similar function in SciPy (but also solves the
+ generalized eigenvalue problem).
Notes
-----
@@ -1435,8 +1456,8 @@ def eigh(a, UPLO='L'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
@@ -1510,6 +1531,11 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
LinAlgError
If SVD computation does not converge.
+ See Also
+ --------
+ scipy.linalg.svd : Similar function in SciPy.
+ scipy.linalg.svdvals : Compute singular values of a matrix.
+
Notes
-----
@@ -1589,26 +1615,31 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
True
"""
+ import numpy as _nx
a, wrap = _makearray(a)
if hermitian:
- # note: lapack returns eigenvalues in reverse order to our contract.
- # reversing is cheap by design in numpy, so we do so to be consistent
+ # note: lapack svd returns eigenvalues with s ** 2 sorted descending,
+ # but eig returns s sorted ascending, so we re-order the eigenvalues
+ # and related arrays to have the correct order
if compute_uv:
s, u = eigh(a)
- s = s[..., ::-1]
- u = u[..., ::-1]
- # singular values are unsigned, move the sign into v
- vt = transpose(u * sign(s)[..., None, :]).conjugate()
+ sgn = sign(s)
s = abs(s)
+ sidx = argsort(s)[..., ::-1]
+ sgn = _nx.take_along_axis(sgn, sidx, axis=-1)
+ s = _nx.take_along_axis(s, sidx, axis=-1)
+ u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
+ # singular values are unsigned, move the sign into v
+ vt = transpose(u * sgn[..., None, :]).conjugate()
return wrap(u), s, wrap(vt)
else:
s = eigvalsh(a)
s = s[..., ::-1]
s = abs(s)
- return s
+ return sort(s)[..., ::-1]
- _assertRankAtLeast2(a)
+ _assert_stacked_2d(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
@@ -1729,7 +1760,8 @@ def cond(x, p=None):
"""
x = asarray(x) # in case we have a matrix
- _assertNoEmpty2d(x)
+ if _is_empty_2d(x):
+ raise LinAlgError("cond is not defined on empty arrays")
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
@@ -1740,8 +1772,8 @@ def cond(x, p=None):
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
- _assertRankAtLeast2(x)
- _assertNdSquareness(x)
+ _assert_stacked_2d(x)
+ _assert_stacked_square(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
@@ -1920,6 +1952,13 @@ def pinv(a, rcond=1e-15, hermitian=False):
LinAlgError
If the SVD computation does not converge.
+ See Also
+ --------
+ scipy.linalg.pinv : Similar function in SciPy.
+ scipy.linalg.pinv2 : Similar function in SciPy (SVD-based).
+ scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a
+ Hermitian matrix.
+
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
@@ -1956,7 +1995,7 @@ def pinv(a, rcond=1e-15, hermitian=False):
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
- if _isEmpty2d(a):
+ if _is_empty_2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
@@ -2052,8 +2091,8 @@ def slogdet(a):
"""
a = asarray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
@@ -2082,6 +2121,7 @@ def det(a):
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
+ scipy.linalg.det : Similar function in SciPy.
Notes
-----
@@ -2112,8 +2152,8 @@ def det(a):
"""
a = asarray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
@@ -2132,13 +2172,13 @@ def lstsq(a, b, rcond="warn"):
r"""
Return the least-squares solution to a linear matrix equation.
- Solves the equation :math:`a x = b` by computing a vector `x` that
- minimizes the squared Euclidean 2-norm :math:`\| b - a x \|^2_2`.
- The equation may be under-, well-, or over-determined (i.e., the
- number of linearly independent rows of `a` can be less than, equal
- to, or greater than its number of linearly independent columns).
+ Computes the vector x that approximatively solves the equation
+ ``a @ x = b``. The equation may be under-, well-, or over-determined
+ (i.e., the number of linearly independent rows of `a` can be less than,
+ equal to, or greater than its number of linearly independent columns).
If `a` is square and of full rank, then `x` (but for round-off error)
- is the "exact" solution of the equation.
+ is the "exact" solution of the equation. Else, `x` minimizes the
+ Euclidean 2-norm :math:`|| b - a x ||`.
Parameters
----------
@@ -2182,6 +2222,10 @@ def lstsq(a, b, rcond="warn"):
LinAlgError
If computation does not converge.
+ See Also
+ --------
+ scipy.linalg.lstsq : Similar function in SciPy.
+
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
@@ -2224,7 +2268,7 @@ def lstsq(a, b, rcond="warn"):
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
- _assertRank2(a, b)
+ _assert_2d(a, b)
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
@@ -2328,16 +2372,19 @@ def norm(x, ord=None, axis=None, keepdims=False):
Parameters
----------
x : array_like
- Input array. If `axis` is None, `x` must be 1-D or 2-D.
+ Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
+ is None. If both `axis` and `ord` are None, the 2-norm of
+ ``x.ravel`` will be returned.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
- `inf` object.
- axis : {int, 2-tuple of ints, None}, optional
+ `inf` object. The default is None.
+ axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
- is 1-D) or a matrix norm (when `x` is 2-D) is returned.
+ is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
+ is None.
.. versionadded:: 1.8.0
@@ -2353,9 +2400,13 @@ def norm(x, ord=None, axis=None, keepdims=False):
n : float or ndarray
Norm of the matrix or vector(s).
+ See Also
+ --------
+ scipy.linalg.norm : Similar function in SciPy.
+
Notes
-----
- For values of ``ord <= 0``, the result is, strictly speaking, not a
+ For values of ``ord < 1``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
@@ -2383,6 +2434,9 @@ def norm(x, ord=None, axis=None, keepdims=False):
The nuclear norm is the sum of the singular values.
+ Both the Frobenius and nuclear norm orders are only defined for
+ matrices and raise a ValueError when ``x.ndim != 2``.
+
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
@@ -2505,11 +2559,11 @@ def norm(x, ord=None, axis=None, keepdims=False):
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
+ # None of the str-type keywords for ord ('fro', 'nuc')
+ # are valid for vectors
+ elif isinstance(ord, str):
+ raise ValueError(f"Invalid norm order '{ord}' for vectors")
else:
- try:
- ord + 1
- except TypeError:
- raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
@@ -2657,7 +2711,7 @@ def multi_dot(arrays):
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
- _assertRank2(*arrays)
+ _assert_2d(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py
index 66c07c9e1..acfab0a68 100644
--- a/numpy/linalg/setup.py
+++ b/numpy/linalg/setup.py
@@ -1,11 +1,9 @@
-from __future__ import division, print_function
-
import os
import sys
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
- from numpy.distutils.system_info import get_info
+ from numpy.distutils.system_info import get_info, system_info
config = Configuration('linalg', parent_package, top_path)
config.add_data_dir('tests')
@@ -26,10 +24,33 @@ def configuration(parent_package='', top_path=None):
]
all_sources = config.paths(lapack_lite_src)
- lapack_info = get_info('lapack_opt', 0) # and {}
+ if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
+ lapack_info = get_info('lapack_ilp64_opt', 2)
+ else:
+ lapack_info = get_info('lapack_opt', 0) # and {}
+
+ use_lapack_lite = not lapack_info
+
+ if use_lapack_lite:
+ # This makes numpy.distutils write the fact that lapack_lite
+ # is being used to numpy.__config__
+ class numpy_linalg_lapack_lite(system_info):
+ def calc_info(self):
+ info = {'language': 'c'}
+ if sys.maxsize > 2**32:
+ # Build lapack-lite in 64-bit integer mode.
+ # The suffix is arbitrary (lapack_lite symbols follow it),
+ # but use the "64_" convention here.
+ info['define_macros'] = [
+ ('HAVE_BLAS_ILP64', None),
+ ('BLAS_SYMBOL_SUFFIX', '64_')
+ ]
+ self.set_info(**info)
+
+ lapack_info = numpy_linalg_lapack_lite().get_info(2)
def get_lapack_lite_sources(ext, build_dir):
- if not lapack_info:
+ if use_lapack_lite:
print("### Warning: Using unoptimized lapack ###")
return all_sources
else:
diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py
index 921390da3..cbf3089bc 100644
--- a/numpy/linalg/tests/test_build.py
+++ b/numpy/linalg/tests/test_build.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from subprocess import PIPE, Popen
import sys
import re
@@ -9,7 +7,7 @@ from numpy.linalg import lapack_lite
from numpy.testing import assert_
-class FindDependenciesLdd(object):
+class FindDependenciesLdd:
def __init__(self):
self.cmd = ['ldd']
@@ -41,7 +39,7 @@ class FindDependenciesLdd(object):
return founds
-class TestF77Mismatch(object):
+class TestF77Mismatch:
@pytest.mark.skipif(not(sys.platform[:5] == 'linux'),
reason="no fortran compiler on non-Linux platform")
diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py
index e12755e0d..cd4c10832 100644
--- a/numpy/linalg/tests/test_deprecations.py
+++ b/numpy/linalg/tests/test_deprecations.py
@@ -1,8 +1,6 @@
"""Test deprecation and future warnings.
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import assert_warns
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 173e81e9c..dae4ef61e 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -1,8 +1,6 @@
""" Test functions for linalg module
"""
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import itertools
@@ -20,8 +18,9 @@ from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
- assert_raises_regex,
+ assert_raises_regex, HAS_LAPACK64,
)
+from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
@@ -67,7 +66,7 @@ all_tags = {
}
-class LinalgCase(object):
+class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
@@ -332,7 +331,7 @@ CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
-class LinalgTestCase(object):
+class LinalgTestCase:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
@@ -633,7 +632,7 @@ class TestEig(EigCases):
assert_(isinstance(a, np.ndarray))
-class SVDBaseTests(object):
+class SVDBaseTests:
hermitian = False
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
@@ -681,6 +680,14 @@ class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
+ def hermitian(mat):
+ axes = list(range(mat.ndim))
+ axes[-1], axes[-2] = axes[-2], axes[-1]
+ return np.conj(np.transpose(mat, axes=axes))
+
+ assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))
+ assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))
+ assert_equal(np.sort(s)[..., ::-1], s)
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
@@ -975,7 +982,7 @@ class TestLstsq(LstsqCases):
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
-class TestMatrixPower(object):
+class TestMatrixPower:
rshft_0 = np.eye(4)
rshft_1 = rshft_0[[3, 0, 1, 2]]
@@ -1075,7 +1082,7 @@ class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
-class TestEigvalsh(object):
+class TestEigvalsh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1151,7 +1158,7 @@ class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
rtol=get_rtol(ev.dtype), err_msg=repr(a))
-class TestEigh(object):
+class TestEigh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1210,7 +1217,7 @@ class TestEigh(object):
assert_(isinstance(a, np.ndarray))
-class _TestNormBase(object):
+class _TestNormBase:
dt = None
dec = None
@@ -1473,11 +1480,12 @@ class _TestNorm2D(_TestNormBase):
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
- # or `ord='nuc'` raises a ValueError.
+ # or `ord='nuc'` or any other string raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
+ assert_raises(ValueError, norm, [3, 4], 'test', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
@@ -1496,7 +1504,7 @@ class _TestNorm(_TestNorm2D, _TestNormGeneral):
pass
-class TestNorm_NonSystematic(object):
+class TestNorm_NonSystematic:
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
@@ -1551,7 +1559,7 @@ class TestNormInt64(_TestNorm, _TestNormInt64Base):
pass
-class TestMatrixRank(object):
+class TestMatrixRank:
def test_matrix_rank(self):
# Full rank matrix
@@ -1600,7 +1608,7 @@ def test_reduced_rank():
assert_equal(matrix_rank(X), 8)
-class TestQR(object):
+class TestQR:
# Define the array class here, so run this on matrices elsewhere.
array = np.array
@@ -1700,7 +1708,7 @@ class TestQR(object):
self.check_qr(m2.T)
-class TestCholesky(object):
+class TestCholesky:
# TODO: are there no other tests for cholesky?
def test_basic_property(self):
@@ -1828,6 +1836,7 @@ def test_xerbla_override():
pytest.skip('Numpy xerbla not linked in.')
+@pytest.mark.slow
def test_sdot_bug_8577():
# Regression test that loading certain other libraries does not
# result to wrong results in float32 linear algebra.
@@ -1862,7 +1871,7 @@ def test_sdot_bug_8577():
subprocess.check_call([sys.executable, "-c", code])
-class TestMultiDot(object):
+class TestMultiDot:
def test_basic_function_with_three_arguments(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
@@ -1956,7 +1965,7 @@ class TestMultiDot(object):
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
-class TestTensorinv(object):
+class TestTensorinv:
@pytest.mark.parametrize("arr, ind", [
(np.ones((4, 6, 8, 2)), 2),
@@ -2002,3 +2011,44 @@ def test_unsupported_commontype():
arr = np.array([[1, -2], [2, 5]], dtype='float16')
with assert_raises_regex(TypeError, "unsupported in linalg"):
linalg.cholesky(arr)
+
+
+@pytest.mark.slow
+@pytest.mark.xfail(not HAS_LAPACK64, run=False,
+ reason="Numpy not compiled with 64-bit BLAS/LAPACK")
+@requires_memory(free_bytes=16e9)
+def test_blas64_dot():
+ n = 2**32
+ a = np.zeros([1, n], dtype=np.float32)
+ b = np.ones([1, 1], dtype=np.float32)
+ a[0,-1] = 1
+ c = np.dot(b, a)
+ assert_equal(c[0,-1], 1)
+
+
+@pytest.mark.xfail(not HAS_LAPACK64,
+ reason="Numpy not compiled with 64-bit BLAS/LAPACK")
+def test_blas64_geqrf_lwork_smoketest():
+ # Smoke test LAPACK geqrf lwork call with 64-bit integers
+ dtype = np.float64
+ lapack_routine = np.linalg.lapack_lite.dgeqrf
+
+ m = 2**32 + 1
+ n = 2**32 + 1
+ lda = m
+
+ # Dummy arrays, not referenced by the lapack routine, so don't
+ # need to be of the right size
+ a = np.zeros([1, 1], dtype=dtype)
+ work = np.zeros([1], dtype=dtype)
+ tau = np.zeros([1], dtype=dtype)
+
+ # Size query
+ results = lapack_routine(m, n, a, lda, tau, work, -1, 0)
+ assert_equal(results['info'], 0)
+ assert_equal(results['m'], m)
+ assert_equal(results['n'], m)
+
+ # Should result to an integer of a reasonable size
+ lwork = int(work.item())
+ assert_(2**32 < lwork < 2**42)
diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py
index bd3a45872..7ed932bc9 100644
--- a/numpy/linalg/tests/test_regression.py
+++ b/numpy/linalg/tests/test_regression.py
@@ -1,7 +1,5 @@
""" Test functions for linalg module
"""
-from __future__ import division, absolute_import, print_function
-
import warnings
import numpy as np
@@ -12,7 +10,7 @@ from numpy.testing import (
)
-class TestRegression(object):
+class TestRegression:
def test_eig_build(self):
# Ticket #652
@@ -59,8 +57,8 @@ class TestRegression(object):
assert_array_almost_equal(b, np.zeros((2, 2)))
def test_norm_vector_badarg(self):
- # Regression for #786: Froebenius norm for vectors raises
- # TypeError.
+ # Regression for #786: Frobenius norm for vectors raises
+ # ValueError.
assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index ee103c327..59647c67d 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -15,6 +15,8 @@
#include "npy_config.h"
+#include "npy_cblas.h"
+
#include <stddef.h>
#include <stdio.h>
#include <assert.h>
@@ -62,301 +64,304 @@ dbg_stack_trace()
*****************************************************************************
*/
-#ifdef NO_APPEND_FORTRAN
-# define FNAME(x) x
-#else
-# define FNAME(x) x##_
-#endif
+#define FNAME(x) BLAS_FUNC(x)
+
+typedef CBLAS_INT fortran_int;
typedef struct { float r, i; } f2c_complex;
typedef struct { double r, i; } f2c_doublecomplex;
/* typedef long int (*L_fp)(); */
-extern int
-FNAME(sgeev)(char *jobvl, char *jobvr, int *n,
- float a[], int *lda, float wr[], float wi[],
- float vl[], int *ldvl, float vr[], int *ldvr,
- float work[], int lwork[],
- int *info);
-extern int
-FNAME(dgeev)(char *jobvl, char *jobvr, int *n,
- double a[], int *lda, double wr[], double wi[],
- double vl[], int *ldvl, double vr[], int *ldvr,
- double work[], int lwork[],
- int *info);
-extern int
-FNAME(cgeev)(char *jobvl, char *jobvr, int *n,
- f2c_doublecomplex a[], int *lda,
+typedef float fortran_real;
+typedef double fortran_doublereal;
+typedef f2c_complex fortran_complex;
+typedef f2c_doublecomplex fortran_doublecomplex;
+
+extern fortran_int
+FNAME(sgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ float a[], fortran_int *lda, float wr[], float wi[],
+ float vl[], fortran_int *ldvl, float vr[], fortran_int *ldvr,
+ float work[], fortran_int lwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(dgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ double a[], fortran_int *lda, double wr[], double wi[],
+ double vl[], fortran_int *ldvl, double vr[], fortran_int *ldvr,
+ double work[], fortran_int lwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(cgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
f2c_doublecomplex w[],
- f2c_doublecomplex vl[], int *ldvl,
- f2c_doublecomplex vr[], int *ldvr,
- f2c_doublecomplex work[], int *lwork,
+ f2c_doublecomplex vl[], fortran_int *ldvl,
+ f2c_doublecomplex vr[], fortran_int *ldvr,
+ f2c_doublecomplex work[], fortran_int *lwork,
double rwork[],
- int *info);
-extern int
-FNAME(zgeev)(char *jobvl, char *jobvr, int *n,
- f2c_doublecomplex a[], int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(zgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
f2c_doublecomplex w[],
- f2c_doublecomplex vl[], int *ldvl,
- f2c_doublecomplex vr[], int *ldvr,
- f2c_doublecomplex work[], int *lwork,
+ f2c_doublecomplex vl[], fortran_int *ldvl,
+ f2c_doublecomplex vr[], fortran_int *ldvr,
+ f2c_doublecomplex work[], fortran_int *lwork,
double rwork[],
- int *info);
-
-extern int
-FNAME(ssyevd)(char *jobz, char *uplo, int *n,
- float a[], int *lda, float w[], float work[],
- int *lwork, int iwork[], int *liwork,
- int *info);
-extern int
-FNAME(dsyevd)(char *jobz, char *uplo, int *n,
- double a[], int *lda, double w[], double work[],
- int *lwork, int iwork[], int *liwork,
- int *info);
-extern int
-FNAME(cheevd)(char *jobz, char *uplo, int *n,
- f2c_complex a[], int *lda,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(ssyevd)(char *jobz, char *uplo, fortran_int *n,
+ float a[], fortran_int *lda, float w[], float work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *liwork,
+ fortran_int *info);
+extern fortran_int
+FNAME(dsyevd)(char *jobz, char *uplo, fortran_int *n,
+ double a[], fortran_int *lda, double w[], double work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *liwork,
+ fortran_int *info);
+extern fortran_int
+FNAME(cheevd)(char *jobz, char *uplo, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
float w[], f2c_complex work[],
- int *lwork, float rwork[], int *lrwork, int iwork[],
- int *liwork,
- int *info);
-extern int
-FNAME(zheevd)(char *jobz, char *uplo, int *n,
- f2c_doublecomplex a[], int *lda,
+ fortran_int *lwork, float rwork[], fortran_int *lrwork, fortran_int iwork[],
+ fortran_int *liwork,
+ fortran_int *info);
+extern fortran_int
+FNAME(zheevd)(char *jobz, char *uplo, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
double w[], f2c_doublecomplex work[],
- int *lwork, double rwork[], int *lrwork, int iwork[],
- int *liwork,
- int *info);
-
-extern int
-FNAME(sgelsd)(int *m, int *n, int *nrhs,
- float a[], int *lda, float b[], int *ldb,
- float s[], float *rcond, int *rank,
- float work[], int *lwork, int iwork[],
- int *info);
-extern int
-FNAME(dgelsd)(int *m, int *n, int *nrhs,
- double a[], int *lda, double b[], int *ldb,
- double s[], double *rcond, int *rank,
- double work[], int *lwork, int iwork[],
- int *info);
-extern int
-FNAME(cgelsd)(int *m, int *n, int *nrhs,
- f2c_complex a[], int *lda,
- f2c_complex b[], int *ldb,
- float s[], float *rcond, int *rank,
- f2c_complex work[], int *lwork,
- float rwork[], int iwork[],
- int *info);
-extern int
-FNAME(zgelsd)(int *m, int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- f2c_doublecomplex b[], int *ldb,
- double s[], double *rcond, int *rank,
- f2c_doublecomplex work[], int *lwork,
- double rwork[], int iwork[],
- int *info);
-
-extern int
-FNAME(sgesv)(int *n, int *nrhs,
- float a[], int *lda,
- int ipiv[],
- float b[], int *ldb,
- int *info);
-extern int
-FNAME(dgesv)(int *n, int *nrhs,
- double a[], int *lda,
- int ipiv[],
- double b[], int *ldb,
- int *info);
-extern int
-FNAME(cgesv)(int *n, int *nrhs,
- f2c_complex a[], int *lda,
- int ipiv[],
- f2c_complex b[], int *ldb,
- int *info);
-extern int
-FNAME(zgesv)(int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- int ipiv[],
- f2c_doublecomplex b[], int *ldb,
- int *info);
-
-extern int
-FNAME(sgetrf)(int *m, int *n,
- float a[], int *lda,
- int ipiv[],
- int *info);
-extern int
-FNAME(dgetrf)(int *m, int *n,
- double a[], int *lda,
- int ipiv[],
- int *info);
-extern int
-FNAME(cgetrf)(int *m, int *n,
- f2c_complex a[], int *lda,
- int ipiv[],
- int *info);
-extern int
-FNAME(zgetrf)(int *m, int *n,
- f2c_doublecomplex a[], int *lda,
- int ipiv[],
- int *info);
-
-extern int
-FNAME(spotrf)(char *uplo, int *n,
- float a[], int *lda,
- int *info);
-extern int
-FNAME(dpotrf)(char *uplo, int *n,
- double a[], int *lda,
- int *info);
-extern int
-FNAME(cpotrf)(char *uplo, int *n,
- f2c_complex a[], int *lda,
- int *info);
-extern int
-FNAME(zpotrf)(char *uplo, int *n,
- f2c_doublecomplex a[], int *lda,
- int *info);
-
-extern int
-FNAME(sgesdd)(char *jobz, int *m, int *n,
- float a[], int *lda, float s[], float u[],
- int *ldu, float vt[], int *ldvt, float work[],
- int *lwork, int iwork[], int *info);
-extern int
-FNAME(dgesdd)(char *jobz, int *m, int *n,
- double a[], int *lda, double s[], double u[],
- int *ldu, double vt[], int *ldvt, double work[],
- int *lwork, int iwork[], int *info);
-extern int
-FNAME(cgesdd)(char *jobz, int *m, int *n,
- f2c_complex a[], int *lda,
- float s[], f2c_complex u[], int *ldu,
- f2c_complex vt[], int *ldvt,
- f2c_complex work[], int *lwork,
- float rwork[], int iwork[], int *info);
-extern int
-FNAME(zgesdd)(char *jobz, int *m, int *n,
- f2c_doublecomplex a[], int *lda,
- double s[], f2c_doublecomplex u[], int *ldu,
- f2c_doublecomplex vt[], int *ldvt,
- f2c_doublecomplex work[], int *lwork,
- double rwork[], int iwork[], int *info);
-
-extern int
-FNAME(spotrs)(char *uplo, int *n, int *nrhs,
- float a[], int *lda,
- float b[], int *ldb,
- int *info);
-extern int
-FNAME(dpotrs)(char *uplo, int *n, int *nrhs,
- double a[], int *lda,
- double b[], int *ldb,
- int *info);
-extern int
-FNAME(cpotrs)(char *uplo, int *n, int *nrhs,
- f2c_complex a[], int *lda,
- f2c_complex b[], int *ldb,
- int *info);
-extern int
-FNAME(zpotrs)(char *uplo, int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- f2c_doublecomplex b[], int *ldb,
- int *info);
-
-extern int
-FNAME(spotri)(char *uplo, int *n,
- float a[], int *lda,
- int *info);
-extern int
-FNAME(dpotri)(char *uplo, int *n,
- double a[], int *lda,
- int *info);
-extern int
-FNAME(cpotri)(char *uplo, int *n,
- f2c_complex a[], int *lda,
- int *info);
-extern int
-FNAME(zpotri)(char *uplo, int *n,
- f2c_doublecomplex a[], int *lda,
- int *info);
-
-extern int
-FNAME(scopy)(int *n,
- float *sx, int *incx,
- float *sy, int *incy);
-extern int
-FNAME(dcopy)(int *n,
- double *sx, int *incx,
- double *sy, int *incy);
-extern int
-FNAME(ccopy)(int *n,
- f2c_complex *sx, int *incx,
- f2c_complex *sy, int *incy);
-extern int
-FNAME(zcopy)(int *n,
- f2c_doublecomplex *sx, int *incx,
- f2c_doublecomplex *sy, int *incy);
+ fortran_int *lwork, double rwork[], fortran_int *lrwork, fortran_int iwork[],
+ fortran_int *liwork,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ float a[], fortran_int *lda, float b[], fortran_int *ldb,
+ float s[], float *rcond, fortran_int *rank,
+ float work[], fortran_int *lwork, fortran_int iwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(dgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda, double b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ double work[], fortran_int *lwork, fortran_int iwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(cgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ f2c_complex a[], fortran_int *lda,
+ f2c_complex b[], fortran_int *ldb,
+ float s[], float *rcond, fortran_int *rank,
+ f2c_complex work[], fortran_int *lwork,
+ float rwork[], fortran_int iwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(zgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ f2c_doublecomplex b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ f2c_doublecomplex work[], fortran_int *lwork,
+ double rwork[], fortran_int iwork[],
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgesv)(fortran_int *n, fortran_int *nrhs,
+ float a[], fortran_int *lda,
+ fortran_int ipiv[],
+ float b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(dgesv)(fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda,
+ fortran_int ipiv[],
+ double b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(cgesv)(fortran_int *n, fortran_int *nrhs,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ f2c_complex b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(zgesv)(fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ f2c_doublecomplex b[], fortran_int *ldb,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgetrf)(fortran_int *m, fortran_int *n,
+ float a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+extern fortran_int
+FNAME(dgetrf)(fortran_int *m, fortran_int *n,
+ double a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+extern fortran_int
+FNAME(cgetrf)(fortran_int *m, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+extern fortran_int
+FNAME(zgetrf)(fortran_int *m, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+
+extern fortran_int
+FNAME(spotrf)(char *uplo, fortran_int *n,
+ float a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(dpotrf)(char *uplo, fortran_int *n,
+ double a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(cpotrf)(char *uplo, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(zpotrf)(char *uplo, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ float a[], fortran_int *lda, float s[], float u[],
+ fortran_int *ldu, float vt[], fortran_int *ldvt, float work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *info);
+extern fortran_int
+FNAME(dgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ double a[], fortran_int *lda, double s[], double u[],
+ fortran_int *ldu, double vt[], fortran_int *ldvt, double work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *info);
+extern fortran_int
+FNAME(cgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ float s[], f2c_complex u[], fortran_int *ldu,
+ f2c_complex vt[], fortran_int *ldvt,
+ f2c_complex work[], fortran_int *lwork,
+ float rwork[], fortran_int iwork[], fortran_int *info);
+extern fortran_int
+FNAME(zgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ double s[], f2c_doublecomplex u[], fortran_int *ldu,
+ f2c_doublecomplex vt[], fortran_int *ldvt,
+ f2c_doublecomplex work[], fortran_int *lwork,
+ double rwork[], fortran_int iwork[], fortran_int *info);
+
+extern fortran_int
+FNAME(spotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ float a[], fortran_int *lda,
+ float b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(dpotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda,
+ double b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(cpotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ f2c_complex a[], fortran_int *lda,
+ f2c_complex b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(zpotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ f2c_doublecomplex b[], fortran_int *ldb,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(spotri)(char *uplo, fortran_int *n,
+ float a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(dpotri)(char *uplo, fortran_int *n,
+ double a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(cpotri)(char *uplo, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(zpotri)(char *uplo, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(scopy)(fortran_int *n,
+ float *sx, fortran_int *incx,
+ float *sy, fortran_int *incy);
+extern fortran_int
+FNAME(dcopy)(fortran_int *n,
+ double *sx, fortran_int *incx,
+ double *sy, fortran_int *incy);
+extern fortran_int
+FNAME(ccopy)(fortran_int *n,
+ f2c_complex *sx, fortran_int *incx,
+ f2c_complex *sy, fortran_int *incy);
+extern fortran_int
+FNAME(zcopy)(fortran_int *n,
+ f2c_doublecomplex *sx, fortran_int *incx,
+ f2c_doublecomplex *sy, fortran_int *incy);
extern float
-FNAME(sdot)(int *n,
- float *sx, int *incx,
- float *sy, int *incy);
+FNAME(sdot)(fortran_int *n,
+ float *sx, fortran_int *incx,
+ float *sy, fortran_int *incy);
extern double
-FNAME(ddot)(int *n,
- double *sx, int *incx,
- double *sy, int *incy);
+FNAME(ddot)(fortran_int *n,
+ double *sx, fortran_int *incx,
+ double *sy, fortran_int *incy);
extern void
-FNAME(cdotu)(f2c_complex *ret, int *n,
- f2c_complex *sx, int *incx,
- f2c_complex *sy, int *incy);
+FNAME(cdotu)(f2c_complex *ret, fortran_int *n,
+ f2c_complex *sx, fortran_int *incx,
+ f2c_complex *sy, fortran_int *incy);
extern void
-FNAME(zdotu)(f2c_doublecomplex *ret, int *n,
- f2c_doublecomplex *sx, int *incx,
- f2c_doublecomplex *sy, int *incy);
+FNAME(zdotu)(f2c_doublecomplex *ret, fortran_int *n,
+ f2c_doublecomplex *sx, fortran_int *incx,
+ f2c_doublecomplex *sy, fortran_int *incy);
extern void
-FNAME(cdotc)(f2c_complex *ret, int *n,
- f2c_complex *sx, int *incx,
- f2c_complex *sy, int *incy);
+FNAME(cdotc)(f2c_complex *ret, fortran_int *n,
+ f2c_complex *sx, fortran_int *incx,
+ f2c_complex *sy, fortran_int *incy);
extern void
-FNAME(zdotc)(f2c_doublecomplex *ret, int *n,
- f2c_doublecomplex *sx, int *incx,
- f2c_doublecomplex *sy, int *incy);
+FNAME(zdotc)(f2c_doublecomplex *ret, fortran_int *n,
+ f2c_doublecomplex *sx, fortran_int *incx,
+ f2c_doublecomplex *sy, fortran_int *incy);
-extern int
+extern fortran_int
FNAME(sgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
float *alpha,
- float *a, int *lda,
- float *b, int *ldb,
+ float *a, fortran_int *lda,
+ float *b, fortran_int *ldb,
float *beta,
- float *c, int *ldc);
-extern int
+ float *c, fortran_int *ldc);
+extern fortran_int
FNAME(dgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
double *alpha,
- double *a, int *lda,
- double *b, int *ldb,
+ double *a, fortran_int *lda,
+ double *b, fortran_int *ldb,
double *beta,
- double *c, int *ldc);
-extern int
+ double *c, fortran_int *ldc);
+extern fortran_int
FNAME(cgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
f2c_complex *alpha,
- f2c_complex *a, int *lda,
- f2c_complex *b, int *ldb,
+ f2c_complex *a, fortran_int *lda,
+ f2c_complex *b, fortran_int *ldb,
f2c_complex *beta,
- f2c_complex *c, int *ldc);
-extern int
+ f2c_complex *c, fortran_int *ldc);
+extern fortran_int
FNAME(zgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
f2c_doublecomplex *alpha,
- f2c_doublecomplex *a, int *lda,
- f2c_doublecomplex *b, int *ldb,
+ f2c_doublecomplex *a, fortran_int *lda,
+ f2c_doublecomplex *b, fortran_int *ldb,
f2c_doublecomplex *beta,
- f2c_doublecomplex *c, int *ldc);
+ f2c_doublecomplex *c, fortran_int *ldc);
#define LAPACK_T(FUNC) \
@@ -369,12 +374,6 @@ FNAME(zgemm)(char *transa, char *transb,
#define LAPACK(FUNC) \
FNAME(FUNC)
-typedef int fortran_int;
-typedef float fortran_real;
-typedef double fortran_doublereal;
-typedef f2c_complex fortran_complex;
-typedef f2c_doublecomplex fortran_doublecomplex;
-
/*
*****************************************************************************
@@ -1085,8 +1084,8 @@ static NPY_INLINE void
static void
@TYPE@_slogdet(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void *NPY_UNUSED(func))
{
fortran_int m;
@@ -1128,8 +1127,8 @@ static void
static void
@TYPE@_det(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void *NPY_UNUSED(func))
{
fortran_int m;
@@ -1427,8 +1426,8 @@ static NPY_INLINE void
@TYPE@_eigh_wrapper(char JOBZ,
char UPLO,
char**args,
- npy_intp* dimensions,
- npy_intp* steps)
+ npy_intp const *dimensions,
+ npy_intp const *steps)
{
ptrdiff_t outer_steps[3];
size_t iter;
@@ -1502,8 +1501,8 @@ static NPY_INLINE void
*/
static void
@TYPE@_eighlo(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void *NPY_UNUSED(func))
{
@TYPE@_eigh_wrapper('V', 'L', args, dimensions, steps);
@@ -1511,8 +1510,8 @@ static void
static void
@TYPE@_eighup(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void* NPY_UNUSED(func))
{
@TYPE@_eigh_wrapper('V', 'U', args, dimensions, steps);
@@ -1520,8 +1519,8 @@ static void
static void
@TYPE@_eigvalshlo(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void* NPY_UNUSED(func))
{
@TYPE@_eigh_wrapper('N', 'L', args, dimensions, steps);
@@ -1529,8 +1528,8 @@ static void
static void
@TYPE@_eigvalshup(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void* NPY_UNUSED(func))
{
@TYPE@_eigh_wrapper('N', 'U', args, dimensions, steps);
@@ -1619,7 +1618,7 @@ release_@lapack_func@(GESV_PARAMS_t *params)
}
static void
-@TYPE@_solve(char **args, npy_intp *dimensions, npy_intp *steps,
+@TYPE@_solve(char **args, npy_intp const *dimensions, npy_intp const *steps,
void *NPY_UNUSED(func))
{
GESV_PARAMS_t params;
@@ -1656,7 +1655,7 @@ static void
}
static void
-@TYPE@_solve1(char **args, npy_intp *dimensions, npy_intp *steps,
+@TYPE@_solve1(char **args, npy_intp const *dimensions, npy_intp const *steps,
void *NPY_UNUSED(func))
{
GESV_PARAMS_t params;
@@ -1691,7 +1690,7 @@ static void
}
static void
-@TYPE@_inv(char **args, npy_intp *dimensions, npy_intp *steps,
+@TYPE@_inv(char **args, npy_intp const *dimensions, npy_intp const *steps,
void *NPY_UNUSED(func))
{
GESV_PARAMS_t params;
@@ -1793,7 +1792,7 @@ release_@lapack_func@(POTR_PARAMS_t *params)
}
static void
-@TYPE@_cholesky(char uplo, char **args, npy_intp *dimensions, npy_intp *steps)
+@TYPE@_cholesky(char uplo, char **args, npy_intp const *dimensions, npy_intp const *steps)
{
POTR_PARAMS_t params;
int error_occurred = get_fp_invalid_and_clear();
@@ -1826,7 +1825,7 @@ static void
}
static void
-@TYPE@_cholesky_lo(char **args, npy_intp *dimensions, npy_intp *steps,
+@TYPE@_cholesky_lo(char **args, npy_intp const *dimensions, npy_intp const *steps,
void *NPY_UNUSED(func))
{
@TYPE@_cholesky('L', args, dimensions, steps);
@@ -1842,7 +1841,7 @@ typedef struct geev_params_struct {
void *WR; /* RWORK in complex versions, REAL W buffer for (sd)geev*/
void *WI;
void *VLR; /* REAL VL buffers for _geev where _ is s, d */
- void *VRR; /* REAL VR buffers for _geev hwere _ is s, d */
+ void *VRR; /* REAL VR buffers for _geev where _ is s, d */
void *WORK;
void *W; /* final w */
void *VL; /* final vl */
@@ -2236,8 +2235,8 @@ static NPY_INLINE void
@TYPE@_eig_wrapper(char JOBVL,
char JOBVR,
char**args,
- npy_intp* dimensions,
- npy_intp* steps)
+ npy_intp const *dimensions,
+ npy_intp const *steps)
{
ptrdiff_t outer_steps[4];
size_t iter;
@@ -2330,8 +2329,8 @@ static NPY_INLINE void
static void
@TYPE@_eig(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void *NPY_UNUSED(func))
{
@TYPE@_eig_wrapper('N', 'V', args, dimensions, steps);
@@ -2339,8 +2338,8 @@ static void
static void
@TYPE@_eigvals(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void *NPY_UNUSED(func))
{
@TYPE@_eig_wrapper('N', 'N', args, dimensions, steps);
@@ -2713,8 +2712,8 @@ release_@lapack_func@(GESDD_PARAMS_t* params)
static NPY_INLINE void
@TYPE@_svd_wrapper(char JOBZ,
char **args,
- npy_intp* dimensions,
- npy_intp* steps)
+ npy_intp const *dimensions,
+ npy_intp const *steps)
{
ptrdiff_t outer_steps[4];
int error_occurred = get_fp_invalid_and_clear();
@@ -2808,8 +2807,8 @@ static NPY_INLINE void
*/
static void
@TYPE@_svd_N(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void *NPY_UNUSED(func))
{
@TYPE@_svd_wrapper('N', args, dimensions, steps);
@@ -2817,8 +2816,8 @@ static void
static void
@TYPE@_svd_S(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void *NPY_UNUSED(func))
{
@TYPE@_svd_wrapper('S', args, dimensions, steps);
@@ -2826,8 +2825,8 @@ static void
static void
@TYPE@_svd_A(char **args,
- npy_intp *dimensions,
- npy_intp *steps,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
void *NPY_UNUSED(func))
{
@TYPE@_svd_wrapper('A', args, dimensions, steps);
@@ -3164,7 +3163,7 @@ static @basetyp@
}
static void
-@TYPE@_lstsq(char **args, npy_intp *dimensions, npy_intp *steps,
+@TYPE@_lstsq(char **args, npy_intp const *dimensions, npy_intp const *steps,
void *NPY_UNUSED(func))
{
GELSD_PARAMS_t params;
@@ -3591,7 +3590,7 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = {
}
};
-static void
+static int
addUfuncs(PyObject *dictionary) {
PyObject *f;
int i;
@@ -3610,12 +3609,19 @@ addUfuncs(PyObject *dictionary) {
d->doc,
0,
d->signature);
- PyDict_SetItemString(dictionary, d->name, f);
+ if (f == NULL) {
+ return -1;
+ }
#if 0
dump_ufunc_object((PyUFuncObject*) f);
#endif
+ int ret = PyDict_SetItemString(dictionary, d->name, f);
Py_DECREF(f);
+ if (ret < 0) {
+ return -1;
+ }
}
+ return 0;
}
@@ -3627,7 +3633,6 @@ static PyMethodDef UMath_LinAlgMethods[] = {
{NULL, NULL, 0, NULL} /* Sentinel */
};
-#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
UMATH_LINALG_MODULE_NAME,
@@ -3639,48 +3644,41 @@ static struct PyModuleDef moduledef = {
NULL,
NULL
};
-#endif
-#if defined(NPY_PY3K)
-#define RETVAL(x) x
PyObject *PyInit__umath_linalg(void)
-#else
-#define RETVAL(x)
-PyMODINIT_FUNC
-init_umath_linalg(void)
-#endif
{
PyObject *m;
PyObject *d;
PyObject *version;
init_constants();
-#if defined(NPY_PY3K)
m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule(UMATH_LINALG_MODULE_NAME, UMath_LinAlgMethods);
-#endif
if (m == NULL) {
- return RETVAL(NULL);
+ return NULL;
}
import_array();
import_ufunc();
d = PyModule_GetDict(m);
+ if (d == NULL) {
+ return NULL;
+ }
version = PyString_FromString(umath_linalg_version_string);
- PyDict_SetItemString(d, "__version__", version);
+ if (version == NULL) {
+ return NULL;
+ }
+ int ret = PyDict_SetItemString(d, "__version__", version);
Py_DECREF(version);
+ if (ret < 0) {
+ return NULL;
+ }
/* Load the ufunc operators into the module's namespace */
- addUfuncs(d);
-
- if (PyErr_Occurred()) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot load _umath_linalg module.");
- return RETVAL(NULL);
+ if (addUfuncs(d) < 0) {
+ return NULL;
}
- return RETVAL(m);
+ return m;
}
diff --git a/numpy/ma/README.txt b/numpy/ma/README.rst
index 47f20d645..47f20d645 100644
--- a/numpy/ma/README.txt
+++ b/numpy/ma/README.rst
diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py
index 36ceb1f6e..870cc4ef2 100644
--- a/numpy/ma/__init__.py
+++ b/numpy/ma/__init__.py
@@ -39,8 +39,6 @@ may now proceed to calculate the mean of the other values:
.. moduleauthor:: Jarrod Millman
"""
-from __future__ import division, absolute_import, print_function
-
from . import core
from .core import *
diff --git a/numpy/ma/bench.py b/numpy/ma/bench.py
index a9ba42dea..83cc6aea7 100644
--- a/numpy/ma/bench.py
+++ b/numpy/ma/bench.py
@@ -1,8 +1,6 @@
-#! /usr/bin/env python
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-from __future__ import division, print_function
-
import timeit
import numpy
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index bb3788c9a..fa888107f 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -20,20 +20,13 @@ Released for unlimited redistribution.
"""
# pylint: disable-msg=E1002
-from __future__ import division, absolute_import, print_function
-
-import sys
+import builtins
import operator
import warnings
import textwrap
import re
from functools import reduce
-if sys.version_info[0] >= 3:
- import builtins
-else:
- import __builtin__ as builtins
-
import numpy as np
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
@@ -41,7 +34,7 @@ from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue
from numpy import array as narray
from numpy.lib.function_base import angle
from numpy.compat import (
- getargspec, formatargspec, long, basestring, unicode, bytes
+ getargspec, formatargspec, long, unicode, bytes
)
from numpy import expand_dims
from numpy.core.numeric import normalize_axis_tuple
@@ -101,7 +94,7 @@ def _deprecate_argsort_axis(arr):
The array which argsort was called on
np.ma.argsort has a long-term bug where the default of the axis argument
- is wrong (gh-8701), which now must be kept for backwards compatibiity.
+ is wrong (gh-8701), which now must be kept for backwards compatibility.
Thankfully, this only makes a difference when arrays are 2- or more-
dimensional, so we only need a warning then.
"""
@@ -293,10 +286,7 @@ def _extremum_fill_value(obj, extremum, extremum_name):
try:
return extremum[dtype]
except KeyError:
- raise TypeError(
- "Unsuitable type {} for calculating {}."
- .format(dtype, extremum_name)
- )
+ raise TypeError(f"Unsuitable type {dtype} for calculating {extremum_name}.")
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
@@ -462,7 +452,7 @@ def _check_fill_value(fill_value, ndtype):
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
dtype=ndtype)
else:
- if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'):
+ if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'):
# Note this check doesn't work if fill_value is not a scalar
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
@@ -602,8 +592,10 @@ def filled(a, fill_value=None):
----------
a : MaskedArray or array_like
An input object.
- fill_value : scalar, optional
- Filling value. Default is None.
+ fill_value : array_like, optional.
+ Can be scalar or non-scalar. If non-scalar, the
+ resulting filled array should be broadcastable
+ over input array. Default is None.
Returns
-------
@@ -623,10 +615,19 @@ def filled(a, fill_value=None):
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
+ >>> x.filled(fill_value=333)
+ array([[333, 1, 2],
+ [333, 4, 5],
+ [ 6, 7, 8]])
+ >>> x.filled(fill_value=np.arange(3))
+ array([[0, 1, 2],
+ [0, 4, 5],
+ [6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
+
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
@@ -776,9 +777,9 @@ def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
return a
def is_string_or_list_of_strings(val):
- return (isinstance(val, basestring) or
+ return (isinstance(val, str) or
(isinstance(val, list) and val and
- builtins.all(isinstance(s, basestring) for s in val)))
+ builtins.all(isinstance(s, str) for s in val)))
###############################################################################
# Ufuncs #
@@ -789,7 +790,7 @@ ufunc_domain = {}
ufunc_fills = {}
-class _DomainCheckInterval(object):
+class _DomainCheckInterval:
"""
Define a valid interval, so that :
@@ -814,7 +815,7 @@ class _DomainCheckInterval(object):
umath.less(x, self.a))
-class _DomainTan(object):
+class _DomainTan:
"""
Define a valid interval for the `tan` function, so that:
@@ -832,7 +833,7 @@ class _DomainTan(object):
return umath.less(umath.absolute(umath.cos(x)), self.eps)
-class _DomainSafeDivide(object):
+class _DomainSafeDivide:
"""
Define a domain for safe division.
@@ -853,7 +854,7 @@ class _DomainSafeDivide(object):
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
-class _DomainGreater(object):
+class _DomainGreater:
"""
DomainGreater(v)(x) is True where x <= v.
@@ -869,7 +870,7 @@ class _DomainGreater(object):
return umath.less_equal(x, self.critical_value)
-class _DomainGreaterEqual(object):
+class _DomainGreaterEqual:
"""
DomainGreaterEqual(v)(x) is True where x < v.
@@ -885,14 +886,14 @@ class _DomainGreaterEqual(object):
return umath.less(x, self.critical_value)
-class _MaskedUFunc(object):
+class _MaskedUFunc:
def __init__(self, ufunc):
self.f = ufunc
self.__doc__ = ufunc.__doc__
self.__name__ = ufunc.__name__
def __str__(self):
- return "Masked version of {}".format(self.f)
+ return f"Masked version of {self.f}"
class _MaskedUnaryOperation(_MaskedUFunc):
@@ -1798,8 +1799,7 @@ def flatten_mask(mask):
try:
for element in sequence:
if hasattr(element, '__iter__'):
- for f in _flatsequence(element):
- yield f
+ yield from _flatsequence(element)
else:
yield element
except TypeError:
@@ -2375,7 +2375,7 @@ def masked_invalid(a, copy=True):
###############################################################################
-class _MaskedPrintOption(object):
+class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
@@ -2526,8 +2526,7 @@ def flatten_structured_array(a):
"""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
- for f in flatten_sequence(elm):
- yield f
+ yield from flatten_sequence(elm)
else:
yield elm
@@ -2593,7 +2592,7 @@ def _arraymethod(funcname, onmask=True):
return wrapped_method
-class MaskedIterator(object):
+class MaskedIterator:
"""
Flat iterator object to iterate over masked arrays.
@@ -2700,8 +2699,6 @@ class MaskedIterator(object):
return masked
return d
- next = __next__
-
class MaskedArray(ndarray):
"""
@@ -2756,6 +2753,52 @@ class MaskedArray(ndarray):
in any order (either C-, Fortran-contiguous, or even discontiguous),
unless a copy is required, in which case it will be C-contiguous.
+ Examples
+ --------
+
+ The ``mask`` can be initialized with an array of boolean values
+ with the same shape as ``data``.
+
+ >>> data = np.arange(6).reshape((2, 3))
+ >>> np.ma.MaskedArray(data, mask=[[False, True, False],
+ ... [False, False, True]])
+ masked_array(
+ data=[[0, --, 2],
+ [3, 4, --]],
+ mask=[[False, True, False],
+ [False, False, True]],
+ fill_value=999999)
+
+ Alternatively, the ``mask`` can be initialized to homogeneous boolean
+ array with the same shape as ``data`` by passing in a scalar
+ boolean value:
+
+ >>> np.ma.MaskedArray(data, mask=False)
+ masked_array(
+ data=[[0, 1, 2],
+ [3, 4, 5]],
+ mask=[[False, False, False],
+ [False, False, False]],
+ fill_value=999999)
+
+ >>> np.ma.MaskedArray(data, mask=True)
+ masked_array(
+ data=[[--, --, --],
+ [--, --, --]],
+ mask=[[ True, True, True],
+ [ True, True, True]],
+ fill_value=999999,
+ dtype=int64)
+
+ .. note::
+ The recommended practice for initializing ``mask`` with a scalar
+ boolean value is to use ``True``/``False`` rather than
+ ``np.True_``/``np.False_``. The reason is :attr:`nomask`
+ is represented internally as ``np.False_``.
+
+ >>> np.False_ is np.ma.nomask
+ True
+
"""
__array_priority__ = 15
@@ -2817,8 +2860,8 @@ class MaskedArray(ndarray):
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
- mask = np.array([getmaskarray(m) for m in data],
- dtype=mdtype)
+ mask = np.array([getmaskarray(np.asanyarray(m, dtype=mdtype))
+ for m in data], dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
@@ -3267,11 +3310,10 @@ class MaskedArray(ndarray):
dout._fill_value.flat[0]).all():
warnings.warn(
"Upon accessing multidimensional field "
- "{indx:s}, need to keep dimensionality "
+ f"{indx!s}, need to keep dimensionality "
"of fill_value at 0. Discarding "
"heterogeneous fill_value and setting "
- "all to {fv!s}.".format(indx=indx,
- fv=dout._fill_value[0]),
+ f"all to {dout._fill_value[0]!s}.",
stacklevel=2)
dout._fill_value = dout._fill_value.flat[0]
dout._isfield = True
@@ -3295,7 +3337,7 @@ class MaskedArray(ndarray):
raise MaskError('Cannot alter the masked element.')
_data = self._data
_mask = self._mask
- if isinstance(indx, basestring):
+ if isinstance(indx, str):
_data[indx] = value
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
@@ -3653,6 +3695,14 @@ class MaskedArray(ndarray):
@fill_value.setter
def fill_value(self, value=None):
target = _check_fill_value(value, self.dtype)
+ if not target.ndim == 0:
+ # 2019-11-12, 1.18.0
+ warnings.warn(
+ "Non-scalar arrays for the fill value are deprecated. Use "
+ "arrays with scalar values instead. The filled function "
+ "still supports any array as `fill_value`.",
+ DeprecationWarning, stacklevel=2)
+
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
@@ -3673,9 +3723,11 @@ class MaskedArray(ndarray):
Parameters
----------
- fill_value : scalar, optional
- The value to use for invalid entries (None by default).
- If None, the `fill_value` attribute of the array is used instead.
+ fill_value : array_like, optional
+ The value to use for invalid entries. Can be scalar or non-scalar.
+ If non-scalar, the resulting ndarray must be broadcastable over
+ input array. Default is None, in which case, the `fill_value`
+ attribute of the array is used instead.
Returns
-------
@@ -3694,6 +3746,8 @@ class MaskedArray(ndarray):
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([ 1, 2, -999, 4, -999])
+ >>> x.filled(fill_value=1000)
+ array([ 1, 2, 1000, 4, 1000])
>>> type(x.filled())
<class 'numpy.ndarray'>
@@ -3874,10 +3928,6 @@ class MaskedArray(ndarray):
def __str__(self):
return str(self._insert_masked_print())
- if sys.version_info.major < 3:
- def __unicode__(self):
- return unicode(self._insert_masked_print())
-
def __repr__(self):
"""
Literal string representation.
@@ -3907,7 +3957,7 @@ class MaskedArray(ndarray):
)
return _legacy_print_templates[key] % parameters
- prefix = 'masked_{}('.format(name)
+ prefix = f"masked_{name}("
dtype_needed = (
not np.core.arrayprint.dtype_is_implied(self.dtype) or
@@ -4321,17 +4371,6 @@ class MaskedArray(ndarray):
raise MaskError('Cannot convert masked element to a Python int.')
return int(self.item())
- def __long__(self):
- """
- Convert to long.
- """
- if self.size > 1:
- raise TypeError("Only length-1 arrays can be converted "
- "to Python scalars")
- elif self._mask:
- raise MaskError('Cannot convert masked element to a Python long.')
- return long(self.item())
-
@property
def imag(self):
"""
@@ -4394,7 +4433,7 @@ class MaskedArray(ndarray):
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the count is performed.
- The default (`axis` = `None`) performs the count over all
+ The default, None, performs the count over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
@@ -4727,7 +4766,7 @@ class MaskedArray(ndarray):
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
- (166691080, 3083169284L) # may vary
+ (166691080, 3083169284) # may vary
"""
if self._mask is nomask:
@@ -4774,7 +4813,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.all : corresponding function for ndarrays
+ numpy.ndarray.all : corresponding function for ndarrays
numpy.all : equivalent function
Examples
@@ -4812,7 +4851,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.any : corresponding function for ndarrays
+ numpy.ndarray.any : corresponding function for ndarrays
numpy.any : equivalent function
"""
@@ -4866,7 +4905,7 @@ class MaskedArray(ndarray):
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
- ndarray.nonzero :
+ numpy.ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
@@ -4994,7 +5033,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.sum : corresponding function for ndarrays
+ numpy.ndarray.sum : corresponding function for ndarrays
numpy.sum : equivalent function
Examples
@@ -5065,7 +5104,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.cumsum : corresponding function for ndarrays
+ numpy.ndarray.cumsum : corresponding function for ndarrays
numpy.cumsum : equivalent function
Examples
@@ -5102,7 +5141,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.prod : corresponding function for ndarrays
+ numpy.ndarray.prod : corresponding function for ndarrays
numpy.prod : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
@@ -5148,7 +5187,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.cumprod : corresponding function for ndarrays
+ numpy.ndarray.cumprod : corresponding function for ndarrays
numpy.cumprod : equivalent function
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
@@ -5171,7 +5210,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.mean : corresponding function for ndarrays
+ numpy.ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average: Weighted average.
@@ -5260,7 +5299,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.var : corresponding function for ndarrays
+ numpy.ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
@@ -5323,7 +5362,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.std : corresponding function for ndarrays
+ numpy.ndarray.std : corresponding function for ndarrays
numpy.std : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
@@ -5344,7 +5383,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.around : corresponding function for ndarrays
+ numpy.ndarray.around : corresponding function for ndarrays
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
@@ -5406,7 +5445,7 @@ class MaskedArray(ndarray):
--------
MaskedArray.sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
- ndarray.sort : Inplace sort.
+ numpy.ndarray.sort : Inplace sort.
Notes
-----
@@ -5558,7 +5597,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.sort : Method to sort an array in-place.
+ numpy.ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
@@ -5850,13 +5889,13 @@ class MaskedArray(ndarray):
def partition(self, *args, **kwargs):
warnings.warn("Warning: 'partition' will ignore the 'mask' "
- "of the {}.".format(self.__class__.__name__),
+ f"of the {self.__class__.__name__}.",
stacklevel=2)
return super(MaskedArray, self).partition(*args, **kwargs)
def argpartition(self, *args, **kwargs):
warnings.warn("Warning: 'argpartition' will ignore the 'mask' "
- "of the {}.".format(self.__class__.__name__),
+ f"of the {self.__class__.__name__}.",
stacklevel=2)
return super(MaskedArray, self).argpartition(*args, **kwargs)
@@ -5948,10 +5987,17 @@ class MaskedArray(ndarray):
return result.tolist()
def tostring(self, fill_value=None, order='C'):
+ r"""
+ A compatibility alias for `tobytes`, with exactly the same behavior.
+
+ Despite its name, it returns `bytes` not `str`\ s.
+
+ .. deprecated:: 1.19.0
"""
- This function is a compatibility alias for tobytes. Despite its name it
- returns bytes not strings.
- """
+ # 2020-03-30, Numpy 1.19.0
+ warnings.warn(
+ "tostring() is deprecated. Use tobytes() instead.",
+ DeprecationWarning, stacklevel=2)
return self.tobytes(fill_value, order=order)
@@ -5978,7 +6024,7 @@ class MaskedArray(ndarray):
See Also
--------
- ndarray.tobytes
+ numpy.ndarray.tobytes
tolist, tofile
Notes
@@ -6202,8 +6248,7 @@ class mvoid(MaskedArray):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
- for d in _data:
- yield d
+ yield from _data
else:
for (d, m) in zip(_data, _mask):
if m:
@@ -6220,9 +6265,11 @@ class mvoid(MaskedArray):
Parameters
----------
- fill_value : scalar, optional
- The value to use for invalid entries (None by default).
- If None, the `fill_value` attribute is used instead.
+ fill_value : array_like, optional
+ The value to use for invalid entries. Can be scalar or
+ non-scalar. If latter is the case, the filled array should
+ be broadcastable over input array. Default is None, in
+ which case the `fill_value` attribute is used instead.
Returns
-------
@@ -6371,10 +6418,6 @@ class MaskedConstant(MaskedArray):
def __str__(self):
return str(masked_print_option._display)
- if sys.version_info.major < 3:
- def __unicode__(self):
- return unicode(masked_print_option._display)
-
def __repr__(self):
if self is MaskedConstant.__singleton:
return 'masked'
@@ -6382,6 +6425,21 @@ class MaskedConstant(MaskedArray):
# it's a subclass, or something is wrong, make it obvious
return object.__repr__(self)
+ def __format__(self, format_spec):
+ # Replace ndarray.__format__ with the default, which supports no format characters.
+ # Supporting format characters is unwise here, because we do not know what type
+ # the user was expecting - better to not guess.
+ try:
+ return object.__format__(self, format_spec)
+ except TypeError:
+ # 2020-03-23, NumPy 1.19.0
+ warnings.warn(
+ "Format strings passed to MaskedConstant are ignored, but in future may "
+ "error or produce different behavior",
+ FutureWarning, stacklevel=2
+ )
+ return object.__format__(self, "")
+
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
@@ -6418,7 +6476,7 @@ class MaskedConstant(MaskedArray):
return super(MaskedConstant, self).__setattr__(attr, value)
elif self is self.__singleton:
raise AttributeError(
- "attributes of {!r} are not writeable".format(self))
+ f"attributes of {self!r} are not writeable")
else:
# duplicate instance - we can end up here from __array_finalize__,
# where we set the __class__ attribute
@@ -6523,8 +6581,8 @@ class _extrema_operation(_MaskedUFunc):
if b is None:
# 2016-04-13, 1.13.0
warnings.warn(
- "Single-argument form of np.ma.{0} is deprecated. Use "
- "np.ma.{0}.reduce instead.".format(self.__name__),
+ f"Single-argument form of np.ma.{self.__name__} is deprecated. Use "
+ f"np.ma.{self.__name__}.reduce instead.",
DeprecationWarning, stacklevel=2)
return self.reduce(a)
return where(self.compare(a, b), a, b)
@@ -6537,11 +6595,9 @@ class _extrema_operation(_MaskedUFunc):
if axis is np._NoValue and target.ndim > 1:
# 2017-05-06, Numpy 1.13.0: warn on axis default
warnings.warn(
- "In the future the default for ma.{0}.reduce will be axis=0, "
- "not the current None, to match np.{0}.reduce. "
- "Explicitly pass 0 or None to silence this warning.".format(
- self.__name__
- ),
+ f"In the future the default for ma.{self.__name__}.reduce will be axis=0, "
+ f"not the current None, to match np.{self.__name__}.reduce. "
+ "Explicitly pass 0 or None to silence this warning.",
MaskedArrayFutureWarning, stacklevel=2)
axis = None
@@ -6621,7 +6677,7 @@ ptp.__doc__ = MaskedArray.ptp.__doc__
##############################################################################
-class _frommethod(object):
+class _frommethod:
"""
Define functions from existing MaskedArray methods.
@@ -7880,10 +7936,8 @@ def asanyarray(a, dtype=None):
def _pickle_warn(method):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
- "np.ma.{method} is deprecated, use pickle.{method} instead"
- .format(method=method),
- DeprecationWarning,
- stacklevel=3)
+ f"np.ma.{method} is deprecated, use pickle.{method} instead",
+ DeprecationWarning, stacklevel=3)
def fromfile(file, dtype=float, count=-1, sep=''):
@@ -7955,7 +8009,7 @@ def fromflex(fxarray):
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
-class _convert2ma(object):
+class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index de1aa3af8..31648fb2e 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -8,8 +8,6 @@ A collection of utilities for `numpy.ma`.
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = [
'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average', 'clump_masked', 'clump_unmasked',
@@ -214,7 +212,7 @@ def masked_all_like(arr):
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
-class _fromnxfunction(object):
+class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
@@ -542,7 +540,7 @@ def average(a, axis=None, weights=None, returned=False):
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
- Axis along which to average `a`. If `None`, averaging is done over
+ Axis along which to average `a`. If None, averaging is done over
the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
@@ -937,7 +935,7 @@ def compress_cols(a):
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
-def mask_rows(a, axis=None):
+def mask_rows(a, axis=np._NoValue):
"""
Mask rows of a 2D array that contain masked values.
@@ -979,9 +977,15 @@ def mask_rows(a, axis=None):
fill_value=1)
"""
+ if axis is not np._NoValue:
+ # remove the axis argument when this deprecation expires
+ # NumPy 1.18.0, 2019-11-28
+ warnings.warn(
+ "The axis argument has always been ignored, in future passing it "
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 0)
-def mask_cols(a, axis=None):
+def mask_cols(a, axis=np._NoValue):
"""
Mask columns of a 2D array that contain masked values.
@@ -1022,6 +1026,12 @@ def mask_cols(a, axis=None):
fill_value=1)
"""
+ if axis is not np._NoValue:
+ # remove the axis argument when this deprecation expires
+ # NumPy 1.18.0, 2019-11-28
+ warnings.warn(
+ "The axis argument has always been ignored, in future passing it "
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 1)
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 826fb0f64..cd93a9a14 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -8,18 +8,14 @@ and the masking of individual fields.
.. moduleauthor:: Pierre Gerard-Marchant
"""
-from __future__ import division, absolute_import, print_function
-
# We should make sure that no field is called '_mask','mask','_fieldmask',
# or whatever restricted keywords. An idea would be to no bother in the
# first place, and then rename the invalid fields with a trailing
# underscore. Maybe we could just overload the parser function ?
-import sys
import warnings
import numpy as np
-from numpy.compat import basestring
from numpy import (
bool_, dtype, ndarray, recarray, array as narray
)
@@ -87,7 +83,7 @@ def _get_fieldmask(self):
return fdmask
-class MaskedRecords(MaskedArray, object):
+class MaskedRecords(MaskedArray):
"""
Attributes
@@ -260,8 +256,7 @@ class MaskedRecords(MaskedArray, object):
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
- exctype, value = sys.exc_info()[:2]
- raise exctype(value)
+ raise
else:
# Get the list of names
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
@@ -306,7 +301,7 @@ class MaskedRecords(MaskedArray, object):
_mask = ndarray.__getattribute__(self, '_mask')
_data = ndarray.view(self, _localdict['_baseclass'])
# We want a field
- if isinstance(indx, basestring):
+ if isinstance(indx, str):
# Make sure _sharedmask is True to propagate back to _fieldmask
# Don't use _set_mask, there are some copies being made that
# break propagation Don't force the mask to nomask, that wreaks
@@ -333,7 +328,7 @@ class MaskedRecords(MaskedArray, object):
"""
MaskedArray.__setitem__(self, indx, value)
- if isinstance(indx, basestring):
+ if isinstance(indx, str):
self._mask[indx] = ma.getmaskarray(value)
def __str__(self):
diff --git a/numpy/ma/setup.py b/numpy/ma/setup.py
index d1d6c89b5..144a961c2 100644
--- a/numpy/ma/setup.py
+++ b/numpy/ma/setup.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
+#!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('ma', parent_package, top_path)
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index b72ce56aa..98fc7dd97 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -4,8 +4,6 @@
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
-from __future__ import division, absolute_import, print_function
-
__author__ = "Pierre GF Gerard-Marchant"
import sys
@@ -65,7 +63,7 @@ num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD']
num_ids = [dt_.char for dt_ in num_dts]
-class TestMaskedArray(object):
+class TestMaskedArray:
# Base test class for MaskedArrays.
def setup(self):
@@ -449,6 +447,21 @@ class TestMaskedArray(object):
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
+ def test_format(self):
+ a = array([0, 1, 2], mask=[False, True, False])
+ assert_equal(format(a), "[0 -- 2]")
+ assert_equal(format(masked), "--")
+ assert_equal(format(masked, ""), "--")
+
+ # Postponed from PR #15410, perhaps address in the future.
+ # assert_equal(format(masked, " >5"), " --")
+ # assert_equal(format(masked, " <5"), "-- ")
+
+ # Expect a FutureWarning for using format_spec with MaskedElement
+ with assert_warns(FutureWarning):
+ with_format_string = format(masked, " >5")
+ assert_equal(with_format_string, "--")
+
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
@@ -936,7 +949,7 @@ class TestMaskedArray(object):
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
- mx = masked_array([mx1, mx2], mask=[False, True])
+ mx = masked_array([mx1, mx2], mask=[False, True], dtype=object)
assert_(mx[0] is mx1)
assert_(mx[1] is not mx2)
assert_(np.all(mx[1].data == mx2.data))
@@ -946,7 +959,7 @@ class TestMaskedArray(object):
assert_(mx2[0] == 0.)
-class TestMaskedArrayArithmetic(object):
+class TestMaskedArrayArithmetic:
# Base test class for MaskedArrays.
def setup(self):
@@ -1558,7 +1571,11 @@ class TestMaskedArrayArithmetic(object):
assert_equal(test.mask, [True, True])
assert_(test.fill_value == True)
- # test = (a[0] == b) # doesn't work in Python2
+ test = (a[0] == b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
test = (b == a[0])
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, False])
@@ -1586,7 +1603,11 @@ class TestMaskedArrayArithmetic(object):
assert_equal(test.mask, [True, True])
assert_(test.fill_value == True)
- # test = (a[0] != b) # doesn't work in Python2
+ test = (a[0] != b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
test = (b != a[0])
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, False])
@@ -1615,7 +1636,11 @@ class TestMaskedArrayArithmetic(object):
assert_equal(test.mask, [True, True])
assert_(test.fill_value == True)
- # test = (a[0] == b) # doesn't work in Python2
+ test = (a[0] == b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
test = (b == a[0])
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, False])
@@ -1644,7 +1669,11 @@ class TestMaskedArrayArithmetic(object):
assert_equal(test.mask, [True, True])
assert_(test.fill_value == True)
- # test = (a[0] != b) # doesn't work in Python2
+ test = (a[0] != b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
test = (b != a[0])
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, False])
@@ -1715,7 +1744,7 @@ class TestMaskedArrayArithmetic(object):
assert_equal(a.mask, [0, 0, 0, 0, 1])
-class TestMaskedArrayAttributes(object):
+class TestMaskedArrayAttributes:
def test_keepmask(self):
# Tests the keep mask flag
@@ -1891,7 +1920,7 @@ class TestMaskedArrayAttributes(object):
assert_equal(m._mask, np.ma.nomask)
-class TestFillingValues(object):
+class TestFillingValues:
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
@@ -2229,7 +2258,7 @@ class TestFillingValues(object):
assert_equal(a["f1"].fill_value, default_fill_value("eggs"))
-class TestUfuncs(object):
+class TestUfuncs:
# Test class for the application of ufuncs on MaskedArrays.
def setup(self):
@@ -2309,7 +2338,7 @@ class TestUfuncs(object):
assert_raises(TypeError, operator.mul, a, "abc")
assert_raises(TypeError, operator.truediv, a, "abc")
- class MyClass(object):
+ class MyClass:
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
@@ -2323,7 +2352,7 @@ class TestUfuncs(object):
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
- class MyClass2(object):
+ class MyClass2:
__array_priority__ = 100
def __mul__(self, other):
@@ -2373,7 +2402,7 @@ class TestUfuncs(object):
# also check that allclose uses ma ufuncs, to avoid warning
allclose(m, 0.5)
-class TestMaskedArrayInPlaceArithmetics(object):
+class TestMaskedArrayInPlaceArithmetics:
# Test MaskedArray Arithmetics
def setup(self):
@@ -2875,7 +2904,7 @@ class TestMaskedArrayInPlaceArithmetics(object):
assert_equal(len(w), 0, "Failed on type=%s." % t)
-class TestMaskedArrayMethods(object):
+class TestMaskedArrayMethods:
# Test class for miscellaneous MaskedArrays methods.
def setup(self):
# Base data definition.
@@ -3582,7 +3611,7 @@ class TestMaskedArrayMethods(object):
assert_equal(xd.data, x.diagonal().data)
-class TestMaskedArrayMathMethods(object):
+class TestMaskedArrayMathMethods:
def setup(self):
# Base data definition.
@@ -3860,7 +3889,7 @@ class TestMaskedArrayMathMethods(object):
assert_equal(a.max(1), [3, 6])
-class TestMaskedArrayMathMethodsComplex(object):
+class TestMaskedArrayMathMethodsComplex:
# Test class for miscellaneous MaskedArrays methods.
def setup(self):
# Base data definition.
@@ -3913,7 +3942,7 @@ class TestMaskedArrayMathMethodsComplex(object):
mX[:, k].compressed().std())
-class TestMaskedArrayFunctions(object):
+class TestMaskedArrayFunctions:
# Test class for miscellaneous functions.
def setup(self):
@@ -4552,7 +4581,7 @@ class TestMaskedArrayFunctions(object):
assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1))
-class TestMaskedFields(object):
+class TestMaskedFields:
def setup(self):
ilist = [1, 2, 3, 4, 5]
@@ -4714,7 +4743,7 @@ class TestMaskedFields(object):
assert_equal(len(rec), len(self.data['ddtype']))
-class TestMaskedObjectArray(object):
+class TestMaskedObjectArray:
def test_getitem(self):
arr = np.ma.array([None, None])
@@ -4762,7 +4791,7 @@ class TestMaskedObjectArray(object):
assert_(arr[0] is np.ma.masked)
-class TestMaskedView(object):
+class TestMaskedView:
def setup(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
@@ -4840,7 +4869,7 @@ class TestMaskedView(object):
assert_(not isinstance(test, MaskedArray))
-class TestOptionalArgs(object):
+class TestOptionalArgs:
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
@@ -4927,7 +4956,7 @@ class TestOptionalArgs(object):
assert_raises(np.AxisError, count, np.ma.array(1), axis=1)
-class TestMaskedConstant(object):
+class TestMaskedConstant:
def _do_add_test(self, add):
# sanity check
assert_(add(np.ma.masked, 1) is np.ma.masked)
@@ -5008,11 +5037,6 @@ class TestMaskedConstant(object):
assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked)
assert_raises(MaskError, int, np.ma.masked)
- @pytest.mark.skipif(sys.version_info.major == 3,
- reason="long doesn't exist in Python 3")
- def test_coercion_long(self):
- assert_raises(MaskError, long, np.ma.masked)
-
def test_coercion_float(self):
a_f = np.zeros((), float)
assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked)
@@ -5044,7 +5068,7 @@ class TestMaskedConstant(object):
assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64)
-class TestMaskedWhereAliases(object):
+class TestMaskedWhereAliases:
# TODO: Test masked_object, masked_equal, ...
diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py
index 72cc29aa0..14f697375 100644
--- a/numpy/ma/tests/test_deprecations.py
+++ b/numpy/ma/tests/test_deprecations.py
@@ -1,14 +1,12 @@
"""Test deprecation and future warnings.
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import assert_warns
from numpy.ma.testutils import assert_equal
from numpy.ma.core import MaskedArrayFutureWarning
-class TestArgsort(object):
+class TestArgsort:
""" gh-8701 """
def _test_base(self, argsort, cls):
arr_0d = np.array(1).view(cls)
@@ -37,7 +35,7 @@ class TestArgsort(object):
return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray)
-class TestMinimumMaximum(object):
+class TestMinimumMaximum:
def test_minimum(self):
assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2]))
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 836770378..1c8610625 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -7,10 +7,9 @@ Adapted from the original test_ma by Pierre Gerard-Marchant
:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
-from __future__ import division, absolute_import, print_function
-
import warnings
import itertools
+import pytest
import numpy as np
from numpy.testing import (
@@ -33,7 +32,7 @@ from numpy.ma.extras import (
)
-class TestGeneric(object):
+class TestGeneric:
#
def test_masked_all(self):
# Tests masked_all
@@ -141,7 +140,7 @@ class TestGeneric(object):
assert_equal(test, [])
-class TestAverage(object):
+class TestAverage:
# Several tests of average. Why so many ? Good point...
def test_testAverage1(self):
# Test of average.
@@ -272,7 +271,7 @@ class TestAverage(object):
assert_almost_equal(wav1.imag, expected1.imag)
-class TestConcatenator(object):
+class TestConcatenator:
# Tests for mr_, the equivalent of r_ for masked arrays.
def test_1d(self):
@@ -316,7 +315,7 @@ class TestConcatenator(object):
assert_equal(actual.data[:2], [1, 2])
-class TestNotMasked(object):
+class TestNotMasked:
# Tests notmasked_edges and notmasked_contiguous.
def test_edges(self):
@@ -386,7 +385,7 @@ class TestNotMasked(object):
])
-class TestCompressFunctions(object):
+class TestCompressFunctions:
def test_compress_nd(self):
# Tests compress_nd
@@ -552,6 +551,18 @@ class TestCompressFunctions(object):
assert_(mask_rowcols(x, 0).mask.all())
assert_(mask_rowcols(x, 1).mask.all())
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize(["func", "rowcols_axis"],
+ [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)])
+ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis):
+ # Test deprecation of the axis argument to `mask_rows` and `mask_cols`
+ x = array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+
+ with assert_warns(DeprecationWarning):
+ res = func(x, axis=axis)
+ assert_equal(res, mask_rowcols(x, rowcols_axis))
+
def test_dot(self):
# Tests dot product
n = np.arange(1, 7)
@@ -639,7 +650,7 @@ class TestCompressFunctions(object):
assert_equal(a, res)
-class TestApplyAlongAxis(object):
+class TestApplyAlongAxis:
# Tests 2D functions
def test_3d(self):
a = arange(12.).reshape(2, 2, 3)
@@ -661,7 +672,7 @@ class TestApplyAlongAxis(object):
assert_equal(xa, [[2, 5], [8, 11]])
-class TestApplyOverAxes(object):
+class TestApplyOverAxes:
# Tests apply_over_axes
def test_basic(self):
a = arange(24).reshape(2, 3, 4)
@@ -674,7 +685,7 @@ class TestApplyOverAxes(object):
assert_equal(test, ctrl)
-class TestMedian(object):
+class TestMedian:
def test_pytype(self):
r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
assert_equal(r, np.inf)
@@ -1053,7 +1064,7 @@ class TestMedian(object):
assert_(type(np.ma.median(o.astype(object))), float)
-class TestCov(object):
+class TestCov:
def setup(self):
self.data = array(np.random.rand(12))
@@ -1120,7 +1131,7 @@ class TestCov(object):
x.shape[0] / frac))
-class TestCorrcoef(object):
+class TestCorrcoef:
def setup(self):
self.data = array(np.random.rand(12))
@@ -1227,7 +1238,7 @@ class TestCorrcoef(object):
control[:-1, :-1])
-class TestPolynomial(object):
+class TestPolynomial:
#
def test_polyfit(self):
# Tests polyfit
@@ -1285,7 +1296,7 @@ class TestPolynomial(object):
assert_almost_equal(a, a_)
-class TestArraySetOps(object):
+class TestArraySetOps:
def test_unique_onlist(self):
# Test unique on list
@@ -1517,7 +1528,7 @@ class TestArraySetOps(object):
assert_array_equal(setdiff1d(a, b), np.array(['c']))
-class TestShapeBase(object):
+class TestShapeBase:
def test_atleast_2d(self):
# Test atleast_2d
@@ -1573,7 +1584,7 @@ class TestShapeBase(object):
assert_equal(b.mask.shape, b.data.shape)
-class TestStack(object):
+class TestStack:
def test_stack_1d(self):
a = masked_array([0, 1, 2], mask=[0, 1, 0])
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index 94e772d55..c2f859273 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -5,8 +5,6 @@
:contact: pierregm_at_uga_dot_edu
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
import numpy.ma as ma
from numpy import recarray
@@ -26,7 +24,7 @@ from numpy.ma.testutils import (
from numpy.compat import pickle
-class TestMRecords(object):
+class TestMRecords:
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
@@ -348,7 +346,7 @@ class TestMRecords(object):
dtype=mult.dtype))
-class TestView(object):
+class TestView:
def setup(self):
(a, b) = (np.arange(10), np.random.rand(10))
@@ -386,7 +384,7 @@ class TestView(object):
##############################################################################
-class TestMRecordsImport(object):
+class TestMRecordsImport:
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index 7100eccbb..96c7e3609 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
from functools import reduce
import numpy as np
@@ -33,7 +31,7 @@ def eq(v, w, msg=''):
return result
-class TestMa(object):
+class TestMa:
def setup(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
@@ -700,7 +698,7 @@ class TestMa(object):
assert_equal(b[1].shape, ())
-class TestUfuncs(object):
+class TestUfuncs:
def setup(self):
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
@@ -765,7 +763,7 @@ class TestUfuncs(object):
assert_(eq(nonzero(x), [0]))
-class TestArrayMethods(object):
+class TestArrayMethods:
def setup(self):
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index b83873a5a..7e76eb054 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -1,12 +1,10 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import (
assert_, assert_array_equal, assert_allclose, suppress_warnings
)
-class TestRegression(object):
+class TestRegression:
def test_masked_array_create(self):
# Ticket #17
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
@@ -88,6 +86,6 @@ class TestRegression(object):
ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4')
assert_array_equal(ma[[]], ma[:0])
- def test_masked_array_tostring_fortran(self):
+ def test_masked_array_tobytes_fortran(self):
ma = np.ma.arange(4).reshape((2,2))
- assert_array_equal(ma.tostring(order='F'), ma.T.tostring())
+ assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes())
diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py
index 440b36722..caa746740 100644
--- a/numpy/ma/tests/test_subclassing.py
+++ b/numpy/ma/tests/test_subclassing.py
@@ -6,8 +6,6 @@
:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import assert_, assert_raises
from numpy.ma.testutils import assert_equal
@@ -80,7 +78,7 @@ msubarray = MSubArray
# and overrides __array_wrap__, updating the info dict, to check that this
# doesn't get destroyed by MaskedArray._update_from. But this one also needs
# its own iterator...
-class CSAIterator(object):
+class CSAIterator:
"""
Flat iterator object that uses its own setter/getter
(works around ndarray.flat not propagating subclass setters/getters
@@ -107,8 +105,6 @@ class CSAIterator(object):
def __next__(self):
return next(self._dataiter).__array__().view(type(self._original))
- next = __next__
-
class ComplicatedSubArray(SubArray):
@@ -154,7 +150,7 @@ class ComplicatedSubArray(SubArray):
return obj
-class TestSubclassing(object):
+class TestSubclassing:
# Test suite for masked subclasses of ndarray.
def setup(self):
diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py
index c0deaa9f4..51ab03948 100644
--- a/numpy/ma/testutils.py
+++ b/numpy/ma/testutils.py
@@ -5,8 +5,6 @@
:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $
"""
-from __future__ import division, absolute_import, print_function
-
import operator
import numpy as np
diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py
index 4ad635e38..83bd7852e 100644
--- a/numpy/ma/timer_comparison.py
+++ b/numpy/ma/timer_comparison.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import timeit
from functools import reduce
@@ -15,7 +13,7 @@ np.seterr(all='ignore')
pi = np.pi
-class ModuleTester(object):
+class ModuleTester:
def __init__(self, module):
self.module = module
self.allequal = module.allequal
diff --git a/numpy/matlib.py b/numpy/matlib.py
index 604ef470b..bd6b63289 100644
--- a/numpy/matlib.py
+++ b/numpy/matlib.py
@@ -1,9 +1,20 @@
-from __future__ import division, absolute_import, print_function
+import warnings
+
+# 2018-05-29, PendingDeprecationWarning added to matrix.__new__
+# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning
+warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. "
+ "The matrix subclass is not the recommended way to represent "
+ "matrices or deal with linear algebra (see "
+ "https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). "
+ "Please adjust your code to use regular ndarray. ",
+ PendingDeprecationWarning, stacklevel=2)
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
-# need * as we're copying the numpy namespace (FIXME: this makes little sense)
-from numpy import *
+# Matlib.py contains all functions in the numpy namespace with a few
+# replacements. See doc/source/reference/routines.matlib.rst for details.
+# Need * as we're copying the numpy namespace.
+from numpy import * # noqa: F403
__version__ = np.__version__
@@ -239,7 +250,7 @@ def rand(*args):
See Also
--------
- randn, numpy.random.rand
+ randn, numpy.random.RandomState.rand
Examples
--------
@@ -285,7 +296,7 @@ def randn(*args):
See Also
--------
- rand, random.randn
+ rand, numpy.random.RandomState.randn
Notes
-----
diff --git a/numpy/matrixlib/__init__.py b/numpy/matrixlib/__init__.py
index 777e0cd33..54154d11f 100644
--- a/numpy/matrixlib/__init__.py
+++ b/numpy/matrixlib/__init__.py
@@ -1,8 +1,6 @@
"""Sub-package containing the matrix class and related functions.
"""
-from __future__ import division, absolute_import, print_function
-
from .defmatrix import *
__all__ = defmatrix.__all__
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index 3c7e8ffc2..12ac74cb2 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
@@ -1046,7 +1044,7 @@ def bmat(obj, ldict=None, gdict=None):
referenced by name.
ldict : dict, optional
A dictionary that replaces local operands in current frame.
- Ignored if `obj` is not a string or `gdict` is `None`.
+ Ignored if `obj` is not a string or `gdict` is None.
gdict : dict, optional
A dictionary that replaces global operands in current frame.
Ignored if `obj` is not a string.
diff --git a/numpy/matrixlib/setup.py b/numpy/matrixlib/setup.py
index d0981d658..529d2a2eb 100644
--- a/numpy/matrixlib/setup.py
+++ b/numpy/matrixlib/setup.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
+#!/usr/bin/env python3
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('matrixlib', parent_package, top_path)
diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py
index aa6e08d64..4cb5f3a37 100644
--- a/numpy/matrixlib/tests/test_defmatrix.py
+++ b/numpy/matrixlib/tests/test_defmatrix.py
@@ -1,11 +1,4 @@
-from __future__ import division, absolute_import, print_function
-
-try:
- # Accessing collections abstract classes from collections
- # has been deprecated since Python 3.3
- import collections.abc as collections_abc
-except ImportError:
- import collections as collections_abc
+import collections.abc
import numpy as np
from numpy import matrix, asmatrix, bmat
@@ -16,7 +9,7 @@ from numpy.testing import (
from numpy.linalg import matrix_power
from numpy.matrixlib import mat
-class TestCtor(object):
+class TestCtor:
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
@@ -63,7 +56,7 @@ class TestCtor(object):
assert_(np.all(b2 == mixresult))
-class TestProperties(object):
+class TestProperties:
def test_sum(self):
"""Test whether matrix.sum(axis=1) preserves orientation.
Fails in NumPy <= 0.9.6.2127.
@@ -196,7 +189,7 @@ class TestProperties(object):
B = matrix([[True], [True], [False]])
assert_array_equal(A, B)
-class TestCasting(object):
+class TestCasting:
def test_basic(self):
A = np.arange(100).reshape(10, 10)
mA = matrix(A)
@@ -215,7 +208,7 @@ class TestCasting(object):
assert_(np.all(mA != mB))
-class TestAlgebra(object):
+class TestAlgebra:
def test_basic(self):
import numpy.linalg as linalg
@@ -274,7 +267,7 @@ class TestAlgebra(object):
A*object()
-class TestMatrixReturn(object):
+class TestMatrixReturn:
def test_instance_methods(self):
a = matrix([1.0], dtype='f8')
methodargs = {
@@ -299,7 +292,7 @@ class TestMatrixReturn(object):
if attrib.startswith('_') or attrib in excluded_methods:
continue
f = getattr(a, attrib)
- if isinstance(f, collections_abc.Callable):
+ if isinstance(f, collections.abc.Callable):
# reset contents of a
a.astype('f8')
a.fill(1.0)
@@ -316,7 +309,7 @@ class TestMatrixReturn(object):
assert_(type(d) is np.ndarray)
-class TestIndexing(object):
+class TestIndexing:
def test_basic(self):
x = asmatrix(np.zeros((3, 2), float))
y = np.zeros((3, 1), float)
@@ -325,7 +318,7 @@ class TestIndexing(object):
assert_equal(x, [[0, 1], [0, 0], [0, 0]])
-class TestNewScalarIndexing(object):
+class TestNewScalarIndexing:
a = matrix([[1, 2], [3, 4]])
def test_dimesions(self):
@@ -392,7 +385,7 @@ class TestNewScalarIndexing(object):
assert_array_equal(x[[2, 1, 0],:], x[::-1,:])
-class TestPower(object):
+class TestPower:
def test_returntype(self):
a = np.array([[0, 1], [0, 0]])
assert_(type(matrix_power(a, 2)) is np.ndarray)
@@ -403,7 +396,7 @@ class TestPower(object):
assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]])
-class TestShape(object):
+class TestShape:
a = np.array([[1], [2]])
m = matrix([[1], [2]])
diff --git a/numpy/matrixlib/tests/test_interaction.py b/numpy/matrixlib/tests/test_interaction.py
index 088ae3c6a..5154bd621 100644
--- a/numpy/matrixlib/tests/test_interaction.py
+++ b/numpy/matrixlib/tests/test_interaction.py
@@ -2,8 +2,6 @@
Note that tests with MaskedArray and linalg are done in separate files.
"""
-from __future__ import division, absolute_import, print_function
-
import pytest
import textwrap
@@ -290,7 +288,7 @@ def test_kron_matrix():
assert_equal(type(np.kron(m, a)), np.matrix)
-class TestConcatenatorMatrix(object):
+class TestConcatenatorMatrix:
# 2018-04-29: moved here from core.tests.test_index_tricks.
def test_matrix(self):
a = [1, 2]
@@ -326,24 +324,17 @@ class TestConcatenatorMatrix(object):
def test_array_equal_error_message_matrix():
# 2018-04-29: moved here from testing.tests.test_utils.
- try:
+ with pytest.raises(AssertionError) as exc_info:
assert_equal(np.array([1, 2]), np.matrix([1, 2]))
- except AssertionError as e:
- msg = str(e)
- msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
- msg_reference = textwrap.dedent("""\
-
- Arrays are not equal
-
- (shapes (2,), (1, 2) mismatch)
- x: array([1, 2])
- y: matrix([[1, 2]])""")
- try:
- assert_equal(msg, msg_reference)
- except AssertionError:
- assert_equal(msg2, msg_reference)
- else:
- raise AssertionError("Did not raise")
+ msg = str(exc_info.value)
+ msg_reference = textwrap.dedent("""\
+
+ Arrays are not equal
+
+ (shapes (2,), (1, 2) mismatch)
+ x: array([1, 2])
+ y: matrix([[1, 2]])""")
+ assert_equal(msg, msg_reference)
def test_array_almost_equal_matrix():
diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py
index d3911d2e1..45424ecf0 100644
--- a/numpy/matrixlib/tests/test_masked_matrix.py
+++ b/numpy/matrixlib/tests/test_masked_matrix.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.ma.testutils import (assert_, assert_equal, assert_raises,
assert_array_equal)
@@ -29,7 +27,7 @@ class MMatrix(MaskedArray, np.matrix,):
return _view
-class TestMaskedMatrix(object):
+class TestMaskedMatrix:
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
@@ -171,7 +169,7 @@ class TestMaskedMatrix(object):
assert_(not isinstance(test, MaskedArray))
-class TestSubclassing(object):
+class TestSubclassing:
# Test suite for masked subclasses of ndarray.
def setup(self):
@@ -212,7 +210,7 @@ class TestSubclassing(object):
assert_(isinstance(divide(mx, x), MMatrix))
assert_equal(divide(mx, mx), divide(xmx, xmx))
-class TestConcatenator(object):
+class TestConcatenator:
# Tests for mr_, the equivalent of r_ for masked arrays.
def test_matrix_builder(self):
diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py
index 6fc733c2e..106c2e382 100644
--- a/numpy/matrixlib/tests/test_matrix_linalg.py
+++ b/numpy/matrixlib/tests/test_matrix_linalg.py
@@ -1,6 +1,4 @@
""" Test functions for linalg module using the matrix class."""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.linalg.tests.test_linalg import (
diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py
index 6d84bd477..638d0d153 100644
--- a/numpy/matrixlib/tests/test_multiarray.py
+++ b/numpy/matrixlib/tests/test_multiarray.py
@@ -1,9 +1,7 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_equal
-class TestView(object):
+class TestView:
def test_type(self):
x = np.array([1, 2, 3])
assert_(isinstance(x.view(np.matrix), np.matrix))
diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py
index 95e1c8001..a772bb388 100644
--- a/numpy/matrixlib/tests/test_numeric.py
+++ b/numpy/matrixlib/tests/test_numeric.py
@@ -1,9 +1,7 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import assert_equal
-class TestDot(object):
+class TestDot:
def test_matscalar(self):
b1 = np.matrix(np.ones((3, 3), dtype=complex))
assert_equal(b1*1.0, b1)
diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py
index 70e147279..a54d44020 100644
--- a/numpy/matrixlib/tests/test_regression.py
+++ b/numpy/matrixlib/tests/test_regression.py
@@ -1,10 +1,8 @@
-from __future__ import division, absolute_import, print_function
-
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
-class TestRegression(object):
+class TestRegression:
def test_kron_matrix(self):
# Ticket #71
x = np.matrix('[1 0; 1 0]')
diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py
index 85cee9ce6..4ff2df57e 100644
--- a/numpy/polynomial/__init__.py
+++ b/numpy/polynomial/__init__.py
@@ -13,8 +13,6 @@ implemented as operations on the coefficients. Additional (module-specific)
information can be found in the docstring for the module of interest.
"""
-from __future__ import division, absolute_import, print_function
-
from .polynomial import Polynomial
from .chebyshev import Chebyshev
from .legendre import Legendre
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index bfa030714..53efbb90f 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -6,8 +6,6 @@ for the various polynomial classes. It operates as a mixin, but uses the
abc module from the stdlib, hence it is only available for Python >= 2.6.
"""
-from __future__ import division, absolute_import, print_function
-
import abc
import numbers
@@ -279,18 +277,16 @@ class ABCPolyBase(abc.ABC):
self.window = window
def __repr__(self):
- format = "%s(%s, domain=%s, window=%s)"
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
name = self.__class__.__name__
- return format % (name, coef, domain, window)
+ return f"{name}({coef}, domain={domain}, window={window})"
def __str__(self):
- format = "%s(%s)"
coef = str(self.coef)
name = self.nickname
- return format % (name, coef)
+ return f"{name}({coef})"
@classmethod
def _repr_latex_term(cls, i, arg_str, needs_parens):
@@ -299,9 +295,7 @@ class ABCPolyBase(abc.ABC):
"Subclasses must define either a basis name, or override "
"_repr_latex_term(i, arg_str, needs_parens)")
# since we always add parens, we don't care if the expression needs them
- return "{{{basis}}}_{{{i}}}({arg_str})".format(
- basis=cls.basis_name, i=i, arg_str=arg_str
- )
+ return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})"
@staticmethod
def _repr_latex_scalar(x):
@@ -316,19 +310,15 @@ class ABCPolyBase(abc.ABC):
term = 'x'
needs_parens = False
elif scale == 1:
- term = '{} + x'.format(
- self._repr_latex_scalar(off)
- )
+ term = f"{self._repr_latex_scalar(off)} + x"
needs_parens = True
elif off == 0:
- term = '{}x'.format(
- self._repr_latex_scalar(scale)
- )
+ term = f"{self._repr_latex_scalar(scale)}x"
needs_parens = True
else:
- term = '{} + {}x'.format(
- self._repr_latex_scalar(off),
- self._repr_latex_scalar(scale)
+ term = (
+ f"{self._repr_latex_scalar(off)} + "
+ f"{self._repr_latex_scalar(scale)}x"
)
needs_parens = True
@@ -338,20 +328,20 @@ class ABCPolyBase(abc.ABC):
for i, c in enumerate(self.coef):
# prevent duplication of + and - signs
if i == 0:
- coef_str = '{}'.format(self._repr_latex_scalar(c))
+ coef_str = f"{self._repr_latex_scalar(c)}"
elif not isinstance(c, numbers.Real):
- coef_str = ' + ({})'.format(self._repr_latex_scalar(c))
+ coef_str = f" + ({self._repr_latex_scalar(c)})"
elif not np.signbit(c):
- coef_str = ' + {}'.format(self._repr_latex_scalar(c))
+ coef_str = f" + {self._repr_latex_scalar(c)}"
else:
- coef_str = ' - {}'.format(self._repr_latex_scalar(-c))
+ coef_str = f" - {self._repr_latex_scalar(-c)}"
# produce the string for the term
term_str = self._repr_latex_term(i, term, needs_parens)
if term_str == '1':
part = coef_str
else:
- part = r'{}\,{}'.format(coef_str, term_str)
+ part = rf"{coef_str}\,{term_str}"
if c == 0:
part = mute(part)
@@ -364,7 +354,7 @@ class ABCPolyBase(abc.ABC):
# in case somehow there are no coefficients at all
body = '0'
- return r'$x \mapsto {}$'.format(body)
+ return rf"$x \mapsto {body}$"
@@ -425,17 +415,15 @@ class ABCPolyBase(abc.ABC):
return NotImplemented
return self.__class__(coef, self.domain, self.window)
- def __div__(self, other):
- # this can be removed when python 2 support is dropped.
- return self.__floordiv__(other)
-
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if not isinstance(other, numbers.Number) or isinstance(other, bool):
- form = "unsupported types for true division: '%s', '%s'"
- raise TypeError(form % (type(self), type(other)))
+ raise TypeError(
+ f"unsupported types for true division: "
+ f"'{type(self)}', '{type(other)}'"
+ )
return self.__floordiv__(other)
def __floordiv__(self, other):
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index 093eb0048..1329ba07d 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -1,5 +1,7 @@
"""
-Objects for dealing with Chebyshev series.
+====================================================
+Chebyshev Series (:mod:`numpy.polynomial.chebyshev`)
+====================================================
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
@@ -7,57 +9,75 @@ encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
+Classes
+-------
+
+.. autosummary::
+ :toctree: generated/
+
+ Chebyshev
+
+
Constants
---------
-- `chebdomain` -- Chebyshev series default domain, [-1,1].
-- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
- identically to 0.
-- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
- identically to 1.
-- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
- ``f(x) = x``.
+
+.. autosummary::
+ :toctree: generated/
+
+ chebdomain
+ chebzero
+ chebone
+ chebx
Arithmetic
----------
-- `chebadd` -- add two Chebyshev series.
-- `chebsub` -- subtract one Chebyshev series from another.
-- `chebmulx` -- multiply a Chebyshev series in ``P_i(x)`` by ``x``.
-- `chebmul` -- multiply two Chebyshev series.
-- `chebdiv` -- divide one Chebyshev series by another.
-- `chebpow` -- raise a Chebyshev series to a positive integer power.
-- `chebval` -- evaluate a Chebyshev series at given points.
-- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
-- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
-- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.
-- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.
+
+.. autosummary::
+ :toctree: generated/
+
+ chebadd
+ chebsub
+ chebmulx
+ chebmul
+ chebdiv
+ chebpow
+ chebval
+ chebval2d
+ chebval3d
+ chebgrid2d
+ chebgrid3d
Calculus
--------
-- `chebder` -- differentiate a Chebyshev series.
-- `chebint` -- integrate a Chebyshev series.
+
+.. autosummary::
+ :toctree: generated/
+
+ chebder
+ chebint
Misc Functions
--------------
-- `chebfromroots` -- create a Chebyshev series with specified roots.
-- `chebroots` -- find the roots of a Chebyshev series.
-- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
-- `chebvander2d` -- Vandermonde-like matrix for 2D power series.
-- `chebvander3d` -- Vandermonde-like matrix for 3D power series.
-- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.
-- `chebweight` -- Chebyshev weight function.
-- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.
-- `chebfit` -- least-squares fit returning a Chebyshev series.
-- `chebpts1` -- Chebyshev points of the first kind.
-- `chebpts2` -- Chebyshev points of the second kind.
-- `chebtrim` -- trim leading coefficients from a Chebyshev series.
-- `chebline` -- Chebyshev series representing given straight line.
-- `cheb2poly` -- convert a Chebyshev series to a polynomial.
-- `poly2cheb` -- convert a polynomial to a Chebyshev series.
-- `chebinterpolate` -- interpolate a function at the Chebyshev points.
-Classes
--------
-- `Chebyshev` -- A Chebyshev series class.
+.. autosummary::
+ :toctree: generated/
+
+ chebfromroots
+ chebroots
+ chebvander
+ chebvander2d
+ chebvander3d
+ chebgauss
+ chebweight
+ chebcompanion
+ chebfit
+ chebpts1
+ chebpts2
+ chebtrim
+ chebline
+ cheb2poly
+ poly2cheb
+ chebinterpolate
See also
--------
@@ -87,9 +107,6 @@ References
(preprint: https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
@@ -1060,7 +1077,6 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
- t = c[j]/(2*j + 1) # FIXME: t never used
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
@@ -1468,7 +1484,7 @@ def chebvander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander2d(chebvander, x, y, deg)
+ return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg)
def chebvander3d(x, y, z, deg):
@@ -1522,7 +1538,7 @@ def chebvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(chebvander, x, y, z, deg)
+ return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg)
def chebfit(x, y, deg, rcond=None, full=False, w=None):
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index 0011fa3b7..44b26f5ee 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -1,5 +1,7 @@
"""
-Objects for dealing with Hermite series.
+==============================================================
+Hermite Series, "Physicists" (:mod:`numpy.polynomial.hermite`)
+==============================================================
This module provides a number of objects (mostly functions) useful for
dealing with Hermite series, including a `Hermite` class that
@@ -7,60 +9,72 @@ encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ Hermite
+
Constants
---------
-- `hermdomain` -- Hermite series default domain, [-1,1].
-- `hermzero` -- Hermite series that evaluates identically to 0.
-- `hermone` -- Hermite series that evaluates identically to 1.
-- `hermx` -- Hermite series for the identity map, ``f(x) = x``.
+.. autosummary::
+ :toctree: generated/
+
+ hermdomain
+ hermzero
+ hermone
+ hermx
Arithmetic
----------
-- `hermadd` -- add two Hermite series.
-- `hermsub` -- subtract one Hermite series from another.
-- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
-- `hermmul` -- multiply two Hermite series.
-- `hermdiv` -- divide one Hermite series by another.
-- `hermpow` -- raise a Hermite series to a positive integer power.
-- `hermval` -- evaluate a Hermite series at given points.
-- `hermval2d` -- evaluate a 2D Hermite series at given points.
-- `hermval3d` -- evaluate a 3D Hermite series at given points.
-- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product.
-- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product.
+.. autosummary::
+ :toctree: generated/
+
+ hermadd
+ hermsub
+ hermmulx
+ hermmul
+ hermdiv
+ hermpow
+ hermval
+ hermval2d
+ hermval3d
+ hermgrid2d
+ hermgrid3d
Calculus
--------
-- `hermder` -- differentiate a Hermite series.
-- `hermint` -- integrate a Hermite series.
+.. autosummary::
+ :toctree: generated/
+
+ hermder
+ hermint
Misc Functions
--------------
-- `hermfromroots` -- create a Hermite series with specified roots.
-- `hermroots` -- find the roots of a Hermite series.
-- `hermvander` -- Vandermonde-like matrix for Hermite polynomials.
-- `hermvander2d` -- Vandermonde-like matrix for 2D power series.
-- `hermvander3d` -- Vandermonde-like matrix for 3D power series.
-- `hermgauss` -- Gauss-Hermite quadrature, points and weights.
-- `hermweight` -- Hermite weight function.
-- `hermcompanion` -- symmetrized companion matrix in Hermite form.
-- `hermfit` -- least-squares fit returning a Hermite series.
-- `hermtrim` -- trim leading coefficients from a Hermite series.
-- `hermline` -- Hermite series of given straight line.
-- `herm2poly` -- convert a Hermite series to a polynomial.
-- `poly2herm` -- convert a polynomial to a Hermite series.
-
-Classes
--------
-- `Hermite` -- A Hermite series class.
+.. autosummary::
+ :toctree: generated/
+
+ hermfromroots
+ hermroots
+ hermvander
+ hermvander2d
+ hermvander3d
+ hermgauss
+ hermweight
+ hermcompanion
+ hermfit
+ hermtrim
+ hermline
+ herm2poly
+ poly2herm
See also
--------
`numpy.polynomial`
"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
@@ -1193,7 +1207,7 @@ def hermvander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander2d(hermvander, x, y, deg)
+ return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg)
def hermvander3d(x, y, z, deg):
@@ -1247,7 +1261,7 @@ def hermvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(hermvander, x, y, z, deg)
+ return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg)
def hermfit(x, y, deg, rcond=None, full=False, w=None):
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index b1cc2d3ab..1a18843ec 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -1,5 +1,7 @@
"""
-Objects for dealing with Hermite_e series.
+===================================================================
+HermiteE Series, "Probabilists" (:mod:`numpy.polynomial.hermite_e`)
+===================================================================
This module provides a number of objects (mostly functions) useful for
dealing with Hermite_e series, including a `HermiteE` class that
@@ -7,60 +9,72 @@ encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ HermiteE
+
Constants
---------
-- `hermedomain` -- Hermite_e series default domain, [-1,1].
-- `hermezero` -- Hermite_e series that evaluates identically to 0.
-- `hermeone` -- Hermite_e series that evaluates identically to 1.
-- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``.
+.. autosummary::
+ :toctree: generated/
+
+ hermedomain
+ hermezero
+ hermeone
+ hermex
Arithmetic
----------
-- `hermeadd` -- add two Hermite_e series.
-- `hermesub` -- subtract one Hermite_e series from another.
-- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
-- `hermemul` -- multiply two Hermite_e series.
-- `hermediv` -- divide one Hermite_e series by another.
-- `hermepow` -- raise a Hermite_e series to a positive integer power.
-- `hermeval` -- evaluate a Hermite_e series at given points.
-- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.
-- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.
-- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product.
-- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product.
+.. autosummary::
+ :toctree: generated/
+
+ hermeadd
+ hermesub
+ hermemulx
+ hermemul
+ hermediv
+ hermepow
+ hermeval
+ hermeval2d
+ hermeval3d
+ hermegrid2d
+ hermegrid3d
Calculus
--------
-- `hermeder` -- differentiate a Hermite_e series.
-- `hermeint` -- integrate a Hermite_e series.
+.. autosummary::
+ :toctree: generated/
+
+ hermeder
+ hermeint
Misc Functions
--------------
-- `hermefromroots` -- create a Hermite_e series with specified roots.
-- `hermeroots` -- find the roots of a Hermite_e series.
-- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials.
-- `hermevander2d` -- Vandermonde-like matrix for 2D power series.
-- `hermevander3d` -- Vandermonde-like matrix for 3D power series.
-- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights.
-- `hermeweight` -- Hermite_e weight function.
-- `hermecompanion` -- symmetrized companion matrix in Hermite_e form.
-- `hermefit` -- least-squares fit returning a Hermite_e series.
-- `hermetrim` -- trim leading coefficients from a Hermite_e series.
-- `hermeline` -- Hermite_e series of given straight line.
-- `herme2poly` -- convert a Hermite_e series to a polynomial.
-- `poly2herme` -- convert a polynomial to a Hermite_e series.
-
-Classes
--------
-- `HermiteE` -- A Hermite_e series class.
+.. autosummary::
+ :toctree: generated/
+
+ hermefromroots
+ hermeroots
+ hermevander
+ hermevander2d
+ hermevander3d
+ hermegauss
+ hermeweight
+ hermecompanion
+ hermefit
+ hermetrim
+ hermeline
+ herme2poly
+ poly2herme
See also
--------
`numpy.polynomial`
"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
@@ -1186,7 +1200,7 @@ def hermevander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander2d(hermevander, x, y, deg)
+ return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg)
def hermevander3d(x, y, z, deg):
@@ -1240,7 +1254,7 @@ def hermevander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(hermevander, x, y, z, deg)
+ return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg)
def hermefit(x, y, deg, rcond=None, full=False, w=None):
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index 7e7e45ca1..89bb8e168 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -1,5 +1,7 @@
"""
-Objects for dealing with Laguerre series.
+==================================================
+Laguerre Series (:mod:`numpy.polynomial.laguerre`)
+==================================================
This module provides a number of objects (mostly functions) useful for
dealing with Laguerre series, including a `Laguerre` class that
@@ -7,60 +9,72 @@ encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ Laguerre
+
Constants
---------
-- `lagdomain` -- Laguerre series default domain, [-1,1].
-- `lagzero` -- Laguerre series that evaluates identically to 0.
-- `lagone` -- Laguerre series that evaluates identically to 1.
-- `lagx` -- Laguerre series for the identity map, ``f(x) = x``.
+.. autosummary::
+ :toctree: generated/
+
+ lagdomain
+ lagzero
+ lagone
+ lagx
Arithmetic
----------
-- `lagadd` -- add two Laguerre series.
-- `lagsub` -- subtract one Laguerre series from another.
-- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
-- `lagmul` -- multiply two Laguerre series.
-- `lagdiv` -- divide one Laguerre series by another.
-- `lagpow` -- raise a Laguerre series to a positive integer power.
-- `lagval` -- evaluate a Laguerre series at given points.
-- `lagval2d` -- evaluate a 2D Laguerre series at given points.
-- `lagval3d` -- evaluate a 3D Laguerre series at given points.
-- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product.
-- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product.
+.. autosummary::
+ :toctree: generated/
+
+ lagadd
+ lagsub
+ lagmulx
+ lagmul
+ lagdiv
+ lagpow
+ lagval
+ lagval2d
+ lagval3d
+ laggrid2d
+ laggrid3d
Calculus
--------
-- `lagder` -- differentiate a Laguerre series.
-- `lagint` -- integrate a Laguerre series.
+.. autosummary::
+ :toctree: generated/
+
+ lagder
+ lagint
Misc Functions
--------------
-- `lagfromroots` -- create a Laguerre series with specified roots.
-- `lagroots` -- find the roots of a Laguerre series.
-- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials.
-- `lagvander2d` -- Vandermonde-like matrix for 2D power series.
-- `lagvander3d` -- Vandermonde-like matrix for 3D power series.
-- `laggauss` -- Gauss-Laguerre quadrature, points and weights.
-- `lagweight` -- Laguerre weight function.
-- `lagcompanion` -- symmetrized companion matrix in Laguerre form.
-- `lagfit` -- least-squares fit returning a Laguerre series.
-- `lagtrim` -- trim leading coefficients from a Laguerre series.
-- `lagline` -- Laguerre series of given straight line.
-- `lag2poly` -- convert a Laguerre series to a polynomial.
-- `poly2lag` -- convert a polynomial to a Laguerre series.
-
-Classes
--------
-- `Laguerre` -- A Laguerre series class.
+.. autosummary::
+ :toctree: generated/
+
+ lagfromroots
+ lagroots
+ lagvander
+ lagvander2d
+ lagvander3d
+ laggauss
+ lagweight
+ lagcompanion
+ lagfit
+ lagtrim
+ lagline
+ lag2poly
+ poly2lag
See also
--------
`numpy.polynomial`
"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
@@ -1193,7 +1207,7 @@ def lagvander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander2d(lagvander, x, y, deg)
+ return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg)
def lagvander3d(x, y, z, deg):
@@ -1247,7 +1261,7 @@ def lagvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(lagvander, x, y, z, deg)
+ return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg)
def lagfit(x, y, deg, rcond=None, full=False, w=None):
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index 281982d0b..85fd5b18b 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -1,8 +1,7 @@
"""
-Legendre Series (:mod: `numpy.polynomial.legendre`)
-===================================================
-
-.. currentmodule:: numpy.polynomial.polynomial
+==================================================
+Legendre Series (:mod:`numpy.polynomial.legendre`)
+==================================================
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
@@ -10,16 +9,23 @@ encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ Legendre
+
Constants
---------
.. autosummary::
:toctree: generated/
- legdomain Legendre series default domain, [-1,1].
- legzero Legendre series that evaluates identically to 0.
- legone Legendre series that evaluates identically to 1.
- legx Legendre series for the identity map, ``f(x) = x``.
+ legdomain
+ legzero
+ legone
+ legx
Arithmetic
----------
@@ -27,17 +33,17 @@ Arithmetic
.. autosummary::
:toctree: generated/
- legadd add two Legendre series.
- legsub subtract one Legendre series from another.
- legmulx multiply a Legendre series in ``P_i(x)`` by ``x``.
- legmul multiply two Legendre series.
- legdiv divide one Legendre series by another.
- legpow raise a Legendre series to a positive integer power.
- legval evaluate a Legendre series at given points.
- legval2d evaluate a 2D Legendre series at given points.
- legval3d evaluate a 3D Legendre series at given points.
- leggrid2d evaluate a 2D Legendre series on a Cartesian product.
- leggrid3d evaluate a 3D Legendre series on a Cartesian product.
+ legadd
+ legsub
+ legmulx
+ legmul
+ legdiv
+ legpow
+ legval
+ legval2d
+ legval3d
+ leggrid2d
+ leggrid3d
Calculus
--------
@@ -45,8 +51,8 @@ Calculus
.. autosummary::
:toctree: generated/
- legder differentiate a Legendre series.
- legint integrate a Legendre series.
+ legder
+ legint
Misc Functions
--------------
@@ -54,36 +60,25 @@ Misc Functions
.. autosummary::
:toctree: generated/
- legfromroots create a Legendre series with specified roots.
- legroots find the roots of a Legendre series.
- legvander Vandermonde-like matrix for Legendre polynomials.
- legvander2d Vandermonde-like matrix for 2D power series.
- legvander3d Vandermonde-like matrix for 3D power series.
- leggauss Gauss-Legendre quadrature, points and weights.
- legweight Legendre weight function.
- legcompanion symmetrized companion matrix in Legendre form.
- legfit least-squares fit returning a Legendre series.
- legtrim trim leading coefficients from a Legendre series.
- legline Legendre series representing given straight line.
- leg2poly convert a Legendre series to a polynomial.
- poly2leg convert a polynomial to a Legendre series.
-
-Classes
--------
- Legendre A Legendre series class.
+ legfromroots
+ legroots
+ legvander
+ legvander2d
+ legvander3d
+ leggauss
+ legweight
+ legcompanion
+ legfit
+ legtrim
+ legline
+ leg2poly
+ poly2leg
See also
--------
-numpy.polynomial.polynomial
-numpy.polynomial.chebyshev
-numpy.polynomial.laguerre
-numpy.polynomial.hermite
-numpy.polynomial.hermite_e
+numpy.polynomial
"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
@@ -1229,7 +1224,7 @@ def legvander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander2d(legvander, x, y, deg)
+ return pu._vander_nd_flat((legvander, legvander), (x, y), deg)
def legvander3d(x, y, z, deg):
@@ -1283,7 +1278,7 @@ def legvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(legvander, x, y, z, deg)
+ return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg)
def legfit(x, y, deg, rcond=None, full=False, w=None):
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index 3f0a902cf..2fb032db3 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -1,5 +1,7 @@
"""
-Objects for dealing with polynomials.
+=================================================
+Power Series (:mod:`numpy.polynomial.polynomial`)
+=================================================
This module provides a number of objects (mostly functions) useful for
dealing with polynomials, including a `Polynomial` class that
@@ -7,56 +9,69 @@ encapsulates the usual arithmetic operations. (General information
on how this module represents and works with polynomial objects is in
the docstring for its "parent" sub-package, `numpy.polynomial`).
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ Polynomial
+
Constants
---------
-- `polydomain` -- Polynomial default domain, [-1,1].
-- `polyzero` -- (Coefficients of the) "zero polynomial."
-- `polyone` -- (Coefficients of the) constant polynomial 1.
-- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``.
+.. autosummary::
+ :toctree: generated/
+
+ polydomain
+ polyzero
+ polyone
+ polyx
Arithmetic
----------
-- `polyadd` -- add two polynomials.
-- `polysub` -- subtract one polynomial from another.
-- `polymulx` -- multiply a polynomial in ``P_i(x)`` by ``x``.
-- `polymul` -- multiply two polynomials.
-- `polydiv` -- divide one polynomial by another.
-- `polypow` -- raise a polynomial to a positive integer power.
-- `polyval` -- evaluate a polynomial at given points.
-- `polyval2d` -- evaluate a 2D polynomial at given points.
-- `polyval3d` -- evaluate a 3D polynomial at given points.
-- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product.
-- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product.
+.. autosummary::
+ :toctree: generated/
+
+ polyadd
+ polysub
+ polymulx
+ polymul
+ polydiv
+ polypow
+ polyval
+ polyval2d
+ polyval3d
+ polygrid2d
+ polygrid3d
Calculus
--------
-- `polyder` -- differentiate a polynomial.
-- `polyint` -- integrate a polynomial.
+.. autosummary::
+ :toctree: generated/
+
+ polyder
+ polyint
Misc Functions
--------------
-- `polyfromroots` -- create a polynomial with specified roots.
-- `polyroots` -- find the roots of a polynomial.
-- `polyvalfromroots` -- evaluate a polynomial at given points from roots.
-- `polyvander` -- Vandermonde-like matrix for powers.
-- `polyvander2d` -- Vandermonde-like matrix for 2D power series.
-- `polyvander3d` -- Vandermonde-like matrix for 3D power series.
-- `polycompanion` -- companion matrix in power series form.
-- `polyfit` -- least-squares fit returning a polynomial.
-- `polytrim` -- trim leading coefficients from a polynomial.
-- `polyline` -- polynomial representing given straight line.
-
-Classes
--------
-- `Polynomial` -- polynomial class.
+.. autosummary::
+ :toctree: generated/
+
+ polyfromroots
+ polyroots
+ polyvalfromroots
+ polyvander
+ polyvander2d
+ polyvander3d
+ polycompanion
+ polyfit
+ polytrim
+ polyline
See Also
--------
`numpy.polynomial`
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = [
'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd',
'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval',
@@ -64,7 +79,6 @@ __all__ = [
'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d',
'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d']
-import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
@@ -1133,7 +1147,7 @@ def polyvander2d(x, y, deg):
polyvander, polyvander3d, polyval2d, polyval3d
"""
- return pu._vander2d(polyvander, x, y, deg)
+ return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg)
def polyvander3d(x, y, z, deg):
@@ -1187,7 +1201,7 @@ def polyvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(polyvander, x, y, z, deg)
+ return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg)
def polyfit(x, y, deg, rcond=None, full=False, w=None):
@@ -1484,10 +1498,10 @@ class Polynomial(ABCPolyBase):
@staticmethod
def _repr_latex_term(i, arg_str, needs_parens):
if needs_parens:
- arg_str = r'\left({}\right)'.format(arg_str)
+ arg_str = rf"\left({arg_str}\right)"
if i == 0:
return '1'
elif i == 1:
return arg_str
else:
- return '{}^{{{}}}'.format(arg_str, i)
+ return f"{arg_str}^{{{i}}}"
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index 35b24d1ab..ec7ba6f1d 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -43,9 +43,8 @@ Functions
mapparms parameters of the linear map between domains.
"""
-from __future__ import division, absolute_import, print_function
-
import operator
+import functools
import warnings
import numpy as np
@@ -79,7 +78,7 @@ class PolyDomainError(PolyError):
# Base class for all polynomial types
#
-class PolyBase(object):
+class PolyBase:
"""
Base class for all polynomial types.
@@ -415,45 +414,89 @@ def mapdomain(x, old, new):
return off + scl*x
-def _vander2d(vander_f, x, y, deg):
- """
- Helper function used to implement the ``<type>vander2d`` functions.
+def _nth_slice(i, ndim):
+ sl = [np.newaxis] * ndim
+ sl[i] = slice(None)
+ return tuple(sl)
+
+
+def _vander_nd(vander_fs, points, degrees):
+ r"""
+ A generalization of the Vandermonde matrix for N dimensions
+
+ The result is built by combining the results of 1d Vandermonde matrices,
+
+ .. math::
+ W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]}
+
+ where
+
+ .. math::
+ N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\
+ M &= \texttt{points[k].ndim} \\
+ V_k &= \texttt{vander\_fs[k]} \\
+ x_k &= \texttt{points[k]} \\
+ 0 \le j_k &\le \texttt{degrees[k]}
+
+ Expanding the one-dimensional :math:`V_k` functions gives:
+
+ .. math::
+ W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])}
+
+ where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along
+ dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`.
Parameters
----------
- vander_f : function(array_like, int) -> ndarray
- The 1d vander function, such as ``polyvander``
- x, y, deg :
- See the ``<type>vander2d`` functions for more detail
+ vander_fs : Sequence[function(array_like, int) -> ndarray]
+ The 1d vander function to use for each axis, such as ``polyvander``
+ points : Sequence[array_like]
+ Arrays of point coordinates, all of the same shape. The dtypes
+ will be converted to either float64 or complex128 depending on
+ whether any of the elements are complex. Scalars are converted to
+ 1-D arrays.
+ This must be the same length as `vander_fs`.
+ degrees : Sequence[int]
+ The maximum degree (inclusive) to use for each axis.
+ This must be the same length as `vander_fs`.
+
+ Returns
+ -------
+ vander_nd : ndarray
+ An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``.
"""
- degx, degy = deg
- x, y = np.array((x, y), copy=False) + 0.0
+ n_dims = len(vander_fs)
+ if n_dims != len(points):
+ raise ValueError(
+ f"Expected {n_dims} dimensions of sample points, got {len(points)}")
+ if n_dims != len(degrees):
+ raise ValueError(
+ f"Expected {n_dims} dimensions of degrees, got {len(degrees)}")
+ if n_dims == 0:
+ raise ValueError("Unable to guess a dtype or shape when no points are given")
- vx = vander_f(x, degx)
- vy = vander_f(y, degy)
- v = vx[..., None]*vy[..., None,:]
- return v.reshape(v.shape[:-2] + (-1,))
+ # convert to the same shape and type
+ points = tuple(np.array(tuple(points), copy=False) + 0.0)
+ # produce the vandermonde matrix for each dimension, placing the last
+ # axis of each in an independent trailing axis of the output
+ vander_arrays = (
+ vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)]
+ for i in range(n_dims)
+ )
-def _vander3d(vander_f, x, y, z, deg):
- """
- Helper function used to implement the ``<type>vander3d`` functions.
+ # we checked this wasn't empty already, so no `initial` needed
+ return functools.reduce(operator.mul, vander_arrays)
- Parameters
- ----------
- vander_f : function(array_like, int) -> ndarray
- The 1d vander function, such as ``polyvander``
- x, y, z, deg :
- See the ``<type>vander3d`` functions for more detail
+
+def _vander_nd_flat(vander_fs, points, degrees):
"""
- degx, degy, degz = deg
- x, y, z = np.array((x, y, z), copy=False) + 0.0
+ Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis
- vx = vander_f(x, degx)
- vy = vander_f(y, degy)
- vz = vander_f(z, degz)
- v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
- return v.reshape(v.shape[:-3] + (-1,))
+ Used to implement the public ``<type>vander<n>d`` functions.
+ """
+ v = _vander_nd(vander_fs, points, degrees)
+ return v.reshape(v.shape[:-len(degrees)] + (-1,))
def _fromroots(line_f, mul_f, roots):
@@ -497,17 +540,15 @@ def _valnd(val_f, c, *args):
c, args :
See the ``<type>val<n>d`` functions for more detail
"""
- try:
- args = tuple(np.array(args, copy=False))
- except Exception:
- # preserve the old error message
- if len(args) == 2:
+ args = [np.asanyarray(a) for a in args]
+ shape0 = args[0].shape
+ if not all((a.shape == shape0 for a in args[1:])):
+ if len(args) == 3:
raise ValueError('x, y, z are incompatible')
- elif len(args) == 3:
+ elif len(args) == 2:
raise ValueError('x, y are incompatible')
else:
raise ValueError('ordinates are incompatible')
-
it = iter(args)
x0 = next(it)
@@ -745,12 +786,11 @@ def _deprecate_as_int(x, desc):
else:
if ix == x:
warnings.warn(
- "In future, this will raise TypeError, as {} will need to "
- "be an integer not just an integral float."
- .format(desc),
+ f"In future, this will raise TypeError, as {desc} will "
+ "need to be an integer not just an integral float.",
DeprecationWarning,
stacklevel=3
)
return ix
- raise TypeError("{} must be an integer".format(desc))
+ raise TypeError(f"{desc} must be an integer")
diff --git a/numpy/polynomial/setup.py b/numpy/polynomial/setup.py
index cb59ee1e5..8fc82cba1 100644
--- a/numpy/polynomial/setup.py
+++ b/numpy/polynomial/setup.py
@@ -1,5 +1,3 @@
-from __future__ import division, print_function
-
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('polynomial', parent_package, top_path)
diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py
index c8d2d6dba..2f54bebfd 100644
--- a/numpy/polynomial/tests/test_chebyshev.py
+++ b/numpy/polynomial/tests/test_chebyshev.py
@@ -1,8 +1,6 @@
"""Tests for chebyshev module.
"""
-from __future__ import division, absolute_import, print_function
-
from functools import reduce
import numpy as np
@@ -30,7 +28,7 @@ T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
-class TestPrivate(object):
+class TestPrivate:
def test__cseries_to_zseries(self):
for i in range(5):
@@ -47,7 +45,7 @@ class TestPrivate(object):
assert_equal(res, tgt)
-class TestConstants(object):
+class TestConstants:
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
@@ -62,12 +60,12 @@ class TestConstants(object):
assert_equal(cheb.chebx, [0, 1])
-class TestArithmetic(object):
+class TestArithmetic:
def test_chebadd(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
@@ -77,7 +75,7 @@ class TestArithmetic(object):
def test_chebsub(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
@@ -95,7 +93,7 @@ class TestArithmetic(object):
def test_chebmul(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
@@ -105,7 +103,7 @@ class TestArithmetic(object):
def test_chebdiv(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = cheb.chebadd(ci, cj)
@@ -116,14 +114,14 @@ class TestArithmetic(object):
def test_chebpow(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(cheb.chebmul, [c]*j, np.array([1]))
res = cheb.chebpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -141,7 +139,7 @@ class TestEvaluation(object):
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Tlist]
for i in range(10):
- msg = "At i=%d" % i
+ msg = f"At i={i}"
tgt = y[i]
res = cheb.chebval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
@@ -217,7 +215,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_chebint(self):
# check exceptions
@@ -319,7 +317,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_chebder(self):
# check exceptions
@@ -359,7 +357,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -407,7 +405,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_chebfit(self):
def f(x):
@@ -484,7 +482,7 @@ class TestFitting(object):
assert_almost_equal(coef1, coef2)
-class TestInterpolate(object):
+class TestInterpolate:
def f(self, x):
return x * (x - 1) * (x - 2)
@@ -509,7 +507,7 @@ class TestInterpolate(object):
assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12)
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, cheb.chebcompanion, [])
@@ -524,7 +522,7 @@ class TestCompanion(object):
assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = cheb.chebgauss(100)
@@ -543,7 +541,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_chebfromroots(self):
res = cheb.chebfromroots([])
diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py
index 2261f960b..e9f256cf8 100644
--- a/numpy/polynomial/tests/test_classes.py
+++ b/numpy/polynomial/tests/test_classes.py
@@ -3,8 +3,6 @@
This tests the convert and cast methods of all the polynomial classes.
"""
-from __future__ import division, absolute_import, print_function
-
import operator as op
from numbers import Number
@@ -15,7 +13,6 @@ from numpy.polynomial import (
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
-from numpy.compat import long
from numpy.polynomial.polyutils import RankWarning
#
@@ -44,7 +41,7 @@ def assert_poly_almost_equal(p1, p2, msg=""):
assert_(np.all(p1.window == p2.window))
assert_almost_equal(p1.coef, p2.coef)
except AssertionError:
- msg = "Result: %s\nTarget: %s", (p1, p2)
+ msg = f"Result: {p1}\nTarget: {p2}"
raise AssertionError(msg)
@@ -317,7 +314,7 @@ def test_truediv(Poly):
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
- for stype in (int, long, float):
+ for stype in (int, float):
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
@@ -574,7 +571,7 @@ def test_ufunc_override(Poly):
-class TestLatexRepr(object):
+class TestLatexRepr:
"""Test the latex repr used by ipython """
def as_latex(self, obj):
@@ -628,7 +625,7 @@ class TestLatexRepr(object):
#
-class TestInterpolate(object):
+class TestInterpolate:
def f(self, x):
return x * (x - 1) * (x - 2)
diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py
index 271c1964b..53ee0844e 100644
--- a/numpy/polynomial/tests/test_hermite.py
+++ b/numpy/polynomial/tests/test_hermite.py
@@ -1,8 +1,6 @@
"""Tests for hermite module.
"""
-from __future__ import division, absolute_import, print_function
-
from functools import reduce
import numpy as np
@@ -30,7 +28,7 @@ def trim(x):
return herm.hermtrim(x, tol=1e-6)
-class TestConstants(object):
+class TestConstants:
def test_hermdomain(self):
assert_equal(herm.hermdomain, [-1, 1])
@@ -45,13 +43,13 @@ class TestConstants(object):
assert_equal(herm.hermx, [0, .5])
-class TestArithmetic(object):
+class TestArithmetic:
x = np.linspace(-3, 3, 100)
def test_hermadd(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
@@ -61,7 +59,7 @@ class TestArithmetic(object):
def test_hermsub(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
@@ -82,7 +80,7 @@ class TestArithmetic(object):
pol1 = [0]*i + [1]
val1 = herm.hermval(self.x, pol1)
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
pol2 = [0]*j + [1]
val2 = herm.hermval(self.x, pol2)
pol3 = herm.hermmul(pol1, pol2)
@@ -93,7 +91,7 @@ class TestArithmetic(object):
def test_hermdiv(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herm.hermadd(ci, cj)
@@ -104,14 +102,14 @@ class TestArithmetic(object):
def test_hermpow(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(herm.hermmul, [c]*j, np.array([1]))
res = herm.hermpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 1., .75])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -129,7 +127,7 @@ class TestEvaluation(object):
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Hlist]
for i in range(10):
- msg = "At i=%d" % i
+ msg = f"At i={i}"
tgt = y[i]
res = herm.hermval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
@@ -205,7 +203,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_hermint(self):
# check exceptions
@@ -307,7 +305,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_hermder(self):
# check exceptions
@@ -347,7 +345,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -395,7 +393,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_hermfit(self):
def f(x):
@@ -472,7 +470,7 @@ class TestFitting(object):
assert_almost_equal(coef1, coef2)
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, herm.hermcompanion, [])
@@ -487,7 +485,7 @@ class TestCompanion(object):
assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = herm.hermgauss(100)
@@ -506,7 +504,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_hermfromroots(self):
res = herm.hermfromroots([])
diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py
index 434b30e7b..2d262a330 100644
--- a/numpy/polynomial/tests/test_hermite_e.py
+++ b/numpy/polynomial/tests/test_hermite_e.py
@@ -1,8 +1,6 @@
"""Tests for hermite_e module.
"""
-from __future__ import division, absolute_import, print_function
-
from functools import reduce
import numpy as np
@@ -30,7 +28,7 @@ def trim(x):
return herme.hermetrim(x, tol=1e-6)
-class TestConstants(object):
+class TestConstants:
def test_hermedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
@@ -45,13 +43,13 @@ class TestConstants(object):
assert_equal(herme.hermex, [0, 1])
-class TestArithmetic(object):
+class TestArithmetic:
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
@@ -61,7 +59,7 @@ class TestArithmetic(object):
def test_hermesub(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
@@ -82,7 +80,7 @@ class TestArithmetic(object):
pol1 = [0]*i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
pol2 = [0]*j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
@@ -93,7 +91,7 @@ class TestArithmetic(object):
def test_hermediv(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herme.hermeadd(ci, cj)
@@ -104,14 +102,14 @@ class TestArithmetic(object):
def test_hermepow(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(herme.hermemul, [c]*j, np.array([1]))
res = herme.hermepow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -129,7 +127,7 @@ class TestEvaluation(object):
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Helist]
for i in range(10):
- msg = "At i=%d" % i
+ msg = f"At i={i}"
tgt = y[i]
res = herme.hermeval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
@@ -205,7 +203,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_hermeint(self):
# check exceptions
@@ -307,7 +305,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_hermeder(self):
# check exceptions
@@ -348,7 +346,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -396,7 +394,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_hermefit(self):
def f(x):
@@ -473,7 +471,7 @@ class TestFitting(object):
assert_almost_equal(coef1, coef2)
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, herme.hermecompanion, [])
@@ -488,7 +486,7 @@ class TestCompanion(object):
assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = herme.hermegauss(100)
@@ -507,7 +505,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_hermefromroots(self):
res = herme.hermefromroots([])
diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py
index 4b9b28637..227ef3c55 100644
--- a/numpy/polynomial/tests/test_laguerre.py
+++ b/numpy/polynomial/tests/test_laguerre.py
@@ -1,8 +1,6 @@
"""Tests for laguerre module.
"""
-from __future__ import division, absolute_import, print_function
-
from functools import reduce
import numpy as np
@@ -27,7 +25,7 @@ def trim(x):
return lag.lagtrim(x, tol=1e-6)
-class TestConstants(object):
+class TestConstants:
def test_lagdomain(self):
assert_equal(lag.lagdomain, [0, 1])
@@ -42,13 +40,13 @@ class TestConstants(object):
assert_equal(lag.lagx, [1, -1])
-class TestArithmetic(object):
+class TestArithmetic:
x = np.linspace(-3, 3, 100)
def test_lagadd(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
@@ -58,7 +56,7 @@ class TestArithmetic(object):
def test_lagsub(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
@@ -79,7 +77,7 @@ class TestArithmetic(object):
pol1 = [0]*i + [1]
val1 = lag.lagval(self.x, pol1)
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
pol2 = [0]*j + [1]
val2 = lag.lagval(self.x, pol2)
pol3 = lag.lagmul(pol1, pol2)
@@ -90,7 +88,7 @@ class TestArithmetic(object):
def test_lagdiv(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = lag.lagadd(ci, cj)
@@ -101,14 +99,14 @@ class TestArithmetic(object):
def test_lagpow(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(lag.lagmul, [c]*j, np.array([1]))
res = lag.lagpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([9., -14., 6.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -126,7 +124,7 @@ class TestEvaluation(object):
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Llist]
for i in range(7):
- msg = "At i=%d" % i
+ msg = f"At i={i}"
tgt = y[i]
res = lag.lagval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
@@ -202,7 +200,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_lagint(self):
# check exceptions
@@ -304,7 +302,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_lagder(self):
# check exceptions
@@ -344,7 +342,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -392,7 +390,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_lagfit(self):
def f(x):
@@ -454,7 +452,7 @@ class TestFitting(object):
assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1])
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, lag.lagcompanion, [])
@@ -469,7 +467,7 @@ class TestCompanion(object):
assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = lag.laggauss(100)
@@ -488,7 +486,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_lagfromroots(self):
res = lag.lagfromroots([])
diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py
index 917a7e03a..a2a212c24 100644
--- a/numpy/polynomial/tests/test_legendre.py
+++ b/numpy/polynomial/tests/test_legendre.py
@@ -1,8 +1,6 @@
"""Tests for legendre module.
"""
-from __future__ import division, absolute_import, print_function
-
from functools import reduce
import numpy as np
@@ -30,7 +28,7 @@ def trim(x):
return leg.legtrim(x, tol=1e-6)
-class TestConstants(object):
+class TestConstants:
def test_legdomain(self):
assert_equal(leg.legdomain, [-1, 1])
@@ -45,13 +43,13 @@ class TestConstants(object):
assert_equal(leg.legx, [0, 1])
-class TestArithmetic(object):
+class TestArithmetic:
x = np.linspace(-1, 1, 100)
def test_legadd(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
@@ -61,7 +59,7 @@ class TestArithmetic(object):
def test_legsub(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
@@ -83,7 +81,7 @@ class TestArithmetic(object):
pol1 = [0]*i + [1]
val1 = leg.legval(self.x, pol1)
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
pol2 = [0]*j + [1]
val2 = leg.legval(self.x, pol2)
pol3 = leg.legmul(pol1, pol2)
@@ -94,7 +92,7 @@ class TestArithmetic(object):
def test_legdiv(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = leg.legadd(ci, cj)
@@ -105,14 +103,14 @@ class TestArithmetic(object):
def test_legpow(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(leg.legmul, [c]*j, np.array([1]))
res = leg.legpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2., 2., 2.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -130,7 +128,7 @@ class TestEvaluation(object):
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Llist]
for i in range(10):
- msg = "At i=%d" % i
+ msg = f"At i={i}"
tgt = y[i]
res = leg.legval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
@@ -206,7 +204,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_legint(self):
# check exceptions
@@ -308,7 +306,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_legder(self):
# check exceptions
@@ -348,7 +346,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -396,7 +394,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_legfit(self):
def f(x):
@@ -473,7 +471,7 @@ class TestFitting(object):
assert_almost_equal(coef1, coef2)
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, leg.legcompanion, [])
@@ -488,7 +486,7 @@ class TestCompanion(object):
assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = leg.leggauss(100)
@@ -507,7 +505,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_legfromroots(self):
res = leg.legfromroots([])
diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py
index 1436963c6..5fd1a82a2 100644
--- a/numpy/polynomial/tests/test_polynomial.py
+++ b/numpy/polynomial/tests/test_polynomial.py
@@ -1,15 +1,13 @@
"""Tests for polynomial module.
"""
-from __future__ import division, absolute_import, print_function
-
from functools import reduce
import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- assert_warns, assert_array_equal)
+ assert_warns, assert_array_equal, assert_raises_regex)
def trim(x):
@@ -29,7 +27,7 @@ T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
-class TestConstants(object):
+class TestConstants:
def test_polydomain(self):
assert_equal(poly.polydomain, [-1, 1])
@@ -44,12 +42,12 @@ class TestConstants(object):
assert_equal(poly.polyx, [0, 1])
-class TestArithmetic(object):
+class TestArithmetic:
def test_polyadd(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
@@ -59,7 +57,7 @@ class TestArithmetic(object):
def test_polysub(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
@@ -77,7 +75,7 @@ class TestArithmetic(object):
def test_polymul(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
tgt = np.zeros(i + j + 1)
tgt[i + j] += 1
res = poly.polymul([0]*i + [1], [0]*j + [1])
@@ -96,7 +94,7 @@ class TestArithmetic(object):
# check rest.
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
ci = [0]*i + [1, 2]
cj = [0]*j + [1, 2]
tgt = poly.polyadd(ci, cj)
@@ -107,14 +105,14 @@ class TestArithmetic(object):
def test_polypow(self):
for i in range(5):
for j in range(5):
- msg = "At i=%d, j=%d" % (i, j)
+ msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(poly.polymul, [c]*j, np.array([1]))
res = poly.polypow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([1., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -229,7 +227,8 @@ class TestEvaluation(object):
y1, y2, y3 = self.y
#test exceptions
- assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d)
+ assert_raises_regex(ValueError, 'incompatible',
+ poly.polyval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
@@ -246,7 +245,8 @@ class TestEvaluation(object):
y1, y2, y3 = self.y
#test exceptions
- assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d)
+ assert_raises_regex(ValueError, 'incompatible',
+ poly.polyval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
@@ -287,7 +287,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_polyint(self):
# check exceptions
@@ -386,7 +386,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_polyder(self):
# check exceptions
@@ -426,7 +426,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -474,7 +474,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, poly.polycompanion, [])
@@ -489,7 +489,7 @@ class TestCompanion(object):
assert_(poly.polycompanion([1, 2])[0, 0] == -.5)
-class TestMisc(object):
+class TestMisc:
def test_polyfromroots(self):
res = poly.polyfromroots([])
diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py
index 801c558cc..1b27f53b5 100644
--- a/numpy/polynomial/tests/test_polyutils.py
+++ b/numpy/polynomial/tests/test_polyutils.py
@@ -1,8 +1,6 @@
"""Tests for polyutils module.
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
import numpy.polynomial.polyutils as pu
from numpy.testing import (
@@ -10,7 +8,7 @@ from numpy.testing import (
)
-class TestMisc(object):
+class TestMisc:
def test_trimseq(self):
for i in range(5):
@@ -43,7 +41,7 @@ class TestMisc(object):
assert_equal(pu.trimcoef(coef, 2), [0])
-class TestDomain(object):
+class TestDomain:
def test_getdomain(self):
# test for real values
diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py
index 3f1236402..049d3af2f 100644
--- a/numpy/polynomial/tests/test_printing.py
+++ b/numpy/polynomial/tests/test_printing.py
@@ -1,10 +1,8 @@
-from __future__ import division, absolute_import, print_function
-
import numpy.polynomial as poly
from numpy.testing import assert_equal
-class TestStr(object):
+class TestStr:
def test_polynomial_str(self):
res = str(poly.Polynomial([0, 1]))
tgt = 'poly([0. 1.])'
@@ -36,7 +34,7 @@ class TestStr(object):
assert_equal(res, tgt)
-class TestRepr(object):
+class TestRepr:
def test_polynomial_str(self):
res = repr(poly.Polynomial([0, 1]))
tgt = 'Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1])'
diff --git a/numpy/random/.gitignore b/numpy/random/.gitignore
new file mode 100644
index 000000000..fea3f955a
--- /dev/null
+++ b/numpy/random/.gitignore
@@ -0,0 +1,3 @@
+# generated files
+_bounded_integers.pyx
+_bounded_integers.pxd
diff --git a/numpy/random/__init__.pxd b/numpy/random/__init__.pxd
new file mode 100644
index 000000000..1f9057296
--- /dev/null
+++ b/numpy/random/__init__.pxd
@@ -0,0 +1,14 @@
+cimport numpy as np
+from libc.stdint cimport uint32_t, uint64_t
+
+cdef extern from "numpy/random/bitgen.h":
+ struct bitgen:
+ void *state
+ uint64_t (*next_uint64)(void *st) nogil
+ uint32_t (*next_uint32)(void *st) nogil
+ double (*next_double)(void *st) nogil
+ uint64_t (*next_raw)(void *st) nogil
+
+ ctypedef bitgen bitgen_t
+
+from numpy.random.bit_generator cimport BitGenerator, SeedSequence
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
index f7c248451..7efa5c07f 100644
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -122,8 +122,6 @@ set_state Set state of generator.
"""
-from __future__ import division, absolute_import, print_function
-
__all__ = [
'beta',
'binomial',
@@ -179,20 +177,19 @@ __all__ = [
# add these for module-freeze analysis (like PyInstaller)
from . import _pickle
-from . import common
-from . import bounded_integers
-
+from . import _common
+from . import _bounded_integers
+
+from ._generator import Generator, default_rng
+from .bit_generator import SeedSequence, BitGenerator
+from ._mt19937 import MT19937
+from ._pcg64 import PCG64
+from ._philox import Philox
+from ._sfc64 import SFC64
from .mtrand import *
-from .generator import Generator, default_rng
-from .bit_generator import SeedSequence
-from .mt19937 import MT19937
-from .pcg64 import PCG64
-from .philox import Philox
-from .sfc64 import SFC64
-from .mtrand import RandomState
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
- 'Philox', 'PCG64', 'SFC64', 'default_rng']
+ 'Philox', 'PCG64', 'SFC64', 'default_rng', 'BitGenerator']
def __RandomState_ctor():
diff --git a/numpy/random/bounded_integers.pxd.in b/numpy/random/_bounded_integers.pxd.in
index 7a3f224dc..5ae5a8067 100644
--- a/numpy/random/bounded_integers.pxd.in
+++ b/numpy/random/_bounded_integers.pxd.in
@@ -4,7 +4,7 @@ import numpy as np
cimport numpy as np
ctypedef np.npy_bool bool_t
-from .common cimport bitgen_t
+from numpy.random cimport bitgen_t
cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
"""Mask generator for use in bounded random numbers"""
diff --git a/numpy/random/bounded_integers.pyx.in b/numpy/random/_bounded_integers.pyx.in
index 411b65a37..9f46685d3 100644
--- a/numpy/random/bounded_integers.pyx.in
+++ b/numpy/random/_bounded_integers.pyx.in
@@ -4,21 +4,53 @@
import numpy as np
cimport numpy as np
-from .distributions cimport *
-
__all__ = []
np.import_array()
-_integers_types = {'bool': (0, 2),
- 'int8': (-2**7, 2**7),
- 'int16': (-2**15, 2**15),
- 'int32': (-2**31, 2**31),
- 'int64': (-2**63, 2**63),
- 'uint8': (0, 2**8),
- 'uint16': (0, 2**16),
- 'uint32': (0, 2**32),
- 'uint64': (0, 2**64)}
+cdef extern from "numpy/random/distributions.h":
+ # Generate random numbers in closed interval [off, off + rng].
+ uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
+ uint64_t off, uint64_t rng,
+ uint64_t mask, bint use_masked) nogil
+ uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng,
+ uint32_t mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng,
+ uint16_t mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state,
+ uint8_t off, uint8_t rng,
+ uint8_t mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ np.npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state,
+ np.npy_bool off, np.npy_bool rng,
+ np.npy_bool mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ void random_bounded_uint64_fill(bitgen_t *bitgen_state,
+ uint64_t off, uint64_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint64_t *out) nogil
+ void random_bounded_uint32_fill(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint32_t *out) nogil
+ void random_bounded_uint16_fill(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint16_t *out) nogil
+ void random_bounded_uint8_fill(bitgen_t *bitgen_state,
+ uint8_t off, uint8_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint8_t *out) nogil
+ void random_bounded_bool_fill(bitgen_t *bitgen_state,
+ np.npy_bool off, np.npy_bool rng, np.npy_intp cnt,
+ bint use_masked,
+ np.npy_bool *out) nogil
+
+
{{
py:
type_info = (('uint32', 'uint32', 'uint64', 'NPY_UINT64', 0, 0, 0, '0X100000000ULL'),
@@ -149,7 +181,7 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
else:
# If input is object or a floating type
- highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.{{nptype}})
+ highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.{{otype}})
highm1_data = <{{nptype}}_t *>np.PyArray_DATA(highm1_arr)
cnt = np.PyArray_SIZE(high_arr)
flat = high_arr.flat
@@ -171,10 +203,10 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.{{nptype}})
+ out_arr = <np.ndarray>np.empty(size, np.{{otype}})
else:
it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.{{nptype}})
+ out_arr = <np.ndarray>np.empty(it.shape, np.{{otype}})
it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
out_data = <uint64_t *>np.PyArray_DATA(out_arr)
@@ -216,12 +248,12 @@ cdef object _rand_{{nptype}}(object low, object high, object size,
"""
_rand_{{nptype}}(low, high, size, use_masked, *state, lock)
- Return random np.{{nptype}} integers from `low` (inclusive) to `high` (exclusive).
+ Return random `np.{{otype}}` integers from `low` (inclusive) to `high` (exclusive).
Return random integers from the "discrete uniform" distribution in the
interval [`low`, `high`). If `high` is None (the default),
then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.{{nptype}} type.
+ to have been validated for size and order for the `np.{{otype}}` type.
Parameters
----------
@@ -247,7 +279,7 @@ cdef object _rand_{{nptype}}(object low, object high, object size,
Returns
-------
- out : python scalar or ndarray of np.{{nptype}}
+ out : python scalar or ndarray of np.{{otype}}
`size`-shaped array of random integers from the appropriate
distribution, or a single such random int if `size` not provided.
@@ -266,14 +298,13 @@ cdef object _rand_{{nptype}}(object low, object high, object size,
if size is not None:
if (np.prod(size) == 0):
- return np.empty(size, dtype=np.{{nptype}})
+ return np.empty(size, dtype=np.{{otype}})
low_arr = <np.ndarray>np.array(low, copy=False)
high_arr = <np.ndarray>np.array(high, copy=False)
low_ndim = np.PyArray_NDIM(low_arr)
high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
+ if low_ndim == 0 and high_ndim == 0:
low = int(low_arr)
high = int(high_arr)
# Subtract 1 since internal generator produces on closed interval [low, high]
@@ -295,7 +326,7 @@ cdef object _rand_{{nptype}}(object low, object high, object size,
random_bounded_{{utype}}_fill(state, off, rng, 1, use_masked, &out_val)
return np.{{otype}}(<{{nptype}}_t>out_val)
else:
- out_arr = <np.ndarray>np.empty(size, np.{{nptype}})
+ out_arr = <np.ndarray>np.empty(size, np.{{otype}})
cnt = np.PyArray_SIZE(out_arr)
out_data = <{{utype}}_t *>np.PyArray_DATA(out_arr)
with lock, nogil:
diff --git a/numpy/random/common.pxd b/numpy/random/_common.pxd
index ac0a94bb0..588f613ae 100644
--- a/numpy/random/common.pxd
+++ b/numpy/random/_common.pxd
@@ -1,23 +1,12 @@
#cython: language_level=3
-from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
- int8_t, int16_t, int32_t, int64_t, intptr_t,
- uintptr_t)
-from libc.math cimport sqrt
-
-cdef extern from "src/bitgen.h":
- struct bitgen:
- void *state
- uint64_t (*next_uint64)(void *st) nogil
- uint32_t (*next_uint32)(void *st) nogil
- double (*next_double)(void *st) nogil
- uint64_t (*next_raw)(void *st) nogil
-
- ctypedef bitgen bitgen_t
+from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
import numpy as np
cimport numpy as np
+from numpy.random cimport bitgen_t
+
cdef double POISSON_LAM_MAX
cdef double LEGACY_POISSON_LAM_MAX
cdef uint64_t MAXSIZE
@@ -44,7 +33,7 @@ cdef object prepare_ctypes(bitgen_t *bitgen)
cdef int check_constraint(double val, object name, constraint_type cons) except -1
cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
-cdef extern from "src/aligned_malloc/aligned_malloc.h":
+cdef extern from "include/aligned_malloc.h":
cdef void *PyArray_realloc_aligned(void *p, size_t n)
cdef void *PyArray_malloc_aligned(size_t n)
cdef void *PyArray_calloc_aligned(size_t n, size_t s)
@@ -56,6 +45,7 @@ ctypedef double (*random_double_1)(void *state, double a) nogil
ctypedef double (*random_double_2)(void *state, double a, double b) nogil
ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil
+ctypedef double (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) nogil
ctypedef float (*random_float_0)(bitgen_t *state) nogil
ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil
diff --git a/numpy/random/common.pyx b/numpy/random/_common.pyx
index 74cd5f033..ef1afac7c 100644
--- a/numpy/random/common.pyx
+++ b/numpy/random/_common.pyx
@@ -6,7 +6,7 @@ import sys
import numpy as np
cimport numpy as np
-from .common cimport *
+from libc.stdint cimport uintptr_t
__all__ = ['interface']
@@ -262,14 +262,16 @@ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, o
return out_array
cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out):
- cdef random_float_0 random_func = (<random_float_0>func)
+ cdef random_float_fill random_func = (<random_float_fill>func)
+ cdef float out_val
cdef float *out_array_data
cdef np.ndarray out_array
cdef np.npy_intp i, n
if size is None and out is None:
with lock:
- return random_func(state)
+ random_func(state, 1, &out_val)
+ return out_val
if out is not None:
check_output(out, np.float32, size)
@@ -280,8 +282,7 @@ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, ob
n = np.PyArray_SIZE(out_array)
out_array_data = <float *>np.PyArray_DATA(out_array)
with lock, nogil:
- for i in range(n):
- out_array_data[i] = random_func(state)
+ random_func(state, n, out_array_data)
return out_array
cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out):
diff --git a/numpy/random/_examples/cffi/extending.py b/numpy/random/_examples/cffi/extending.py
new file mode 100644
index 000000000..8440d400e
--- /dev/null
+++ b/numpy/random/_examples/cffi/extending.py
@@ -0,0 +1,40 @@
+"""
+Use cffi to access any of the underlying C functions from distributions.h
+"""
+import os
+import numpy as np
+import cffi
+from .parse import parse_distributions_h
+ffi = cffi.FFI()
+
+inc_dir = os.path.join(np.get_include(), 'numpy')
+
+# Basic numpy types
+ffi.cdef('''
+ typedef intptr_t npy_intp;
+ typedef unsigned char npy_bool;
+
+''')
+
+parse_distributions_h(ffi, inc_dir)
+
+lib = ffi.dlopen(np.random._generator.__file__)
+
+# Compare the distributions.h random_standard_normal_fill to
+# Generator.standard_random
+bit_gen = np.random.PCG64()
+rng = np.random.Generator(bit_gen)
+state = bit_gen.state
+
+interface = rng.bit_generator.cffi
+n = 100
+vals_cffi = ffi.new('double[%d]' % n)
+lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi)
+
+# reset the state
+bit_gen.state = state
+
+vals = rng.standard_normal(n)
+
+for i in range(n):
+ assert vals[i] == vals_cffi[i]
diff --git a/numpy/random/_examples/cffi/parse.py b/numpy/random/_examples/cffi/parse.py
new file mode 100644
index 000000000..73d8646c7
--- /dev/null
+++ b/numpy/random/_examples/cffi/parse.py
@@ -0,0 +1,46 @@
+import os
+
+
+def parse_distributions_h(ffi, inc_dir):
+ """
+ Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef
+
+ Read the function declarations without the "#define ..." macros that will
+ be filled in when loading the library.
+ """
+
+ with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
+ s = []
+ for line in fid:
+ # massage the include file
+ if line.strip().startswith('#'):
+ continue
+ s.append(line)
+ ffi.cdef('\n'.join(s))
+
+ with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
+ s = []
+ in_skip = 0
+ for line in fid:
+ # massage the include file
+ if line.strip().startswith('#'):
+ continue
+
+ # skip any inlined function definition
+ # which starts with 'static NPY_INLINE xxx(...) {'
+ # and ends with a closing '}'
+ if line.strip().startswith('static NPY_INLINE'):
+ in_skip += line.count('{')
+ continue
+ elif in_skip > 0:
+ in_skip += line.count('{')
+ in_skip -= line.count('}')
+ continue
+
+ # replace defines with their value or remove them
+ line = line.replace('DECLDIR', '')
+ line = line.replace('NPY_INLINE', '')
+ line = line.replace('RAND_INT_TYPE', 'int64_t')
+ s.append(line)
+ ffi.cdef('\n'.join(s))
+
diff --git a/numpy/random/examples/cython/extending.pyx b/numpy/random/_examples/cython/extending.pyx
index a6a4ba4bf..3a7f81aa0 100644
--- a/numpy/random/examples/cython/extending.pyx
+++ b/numpy/random/_examples/cython/extending.pyx
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#cython: language_level=3
from libc.stdint cimport uint32_t
@@ -8,7 +8,7 @@ import numpy as np
cimport numpy as np
cimport cython
-from numpy.random.common cimport bitgen_t
+from numpy.random cimport bitgen_t
from numpy.random import PCG64
np.import_array()
@@ -39,7 +39,7 @@ def uniform_mean(Py_ssize_t n):
return randoms.mean()
-# This function is declated nogil so it can be used without the GIL below
+# This function is declared nogil so it can be used without the GIL below
cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
cdef uint32_t mask, delta, val
mask = delta = ub - lb
diff --git a/numpy/random/_examples/cython/extending_distributions.pyx b/numpy/random/_examples/cython/extending_distributions.pyx
new file mode 100644
index 000000000..d908e92d0
--- /dev/null
+++ b/numpy/random/_examples/cython/extending_distributions.pyx
@@ -0,0 +1,117 @@
+#!/usr/bin/env python3
+#cython: language_level=3
+"""
+This file shows how the to use a BitGenerator to create a distribution.
+"""
+import numpy as np
+cimport numpy as np
+cimport cython
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+from libc.stdint cimport uint16_t, uint64_t
+from numpy.random cimport bitgen_t
+from numpy.random import PCG64
+from numpy.random.c_distributions cimport (
+ random_standard_uniform_fill, random_standard_uniform_fill_f)
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uniforms(Py_ssize_t n):
+ """
+ Create an array of `n` uniformly distributed doubles.
+ A 'real' distribution would want to process the values into
+ some non-uniform distribution
+ """
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+
+ x = PCG64()
+ capsule = x.capsule
+ # Optional check that the capsule if from a BitGenerator
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ # Cast the pointer
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n, dtype='float64')
+ with x.lock, nogil:
+ for i in range(n):
+ # Call the function
+ random_values[i] = rng.next_double(rng.state)
+ randoms = np.asarray(random_values)
+
+ return randoms
+
+# cython example 2
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uint10_uniforms(Py_ssize_t n):
+ """Uniform 10 bit integers stored as 16-bit unsigned integers"""
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef uint16_t[::1] random_values
+ cdef int bits_remaining
+ cdef int width = 10
+ cdef uint64_t buff, mask = 0x3FF
+
+ x = PCG64()
+ capsule = x.capsule
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n, dtype='uint16')
+ # Best practice is to release GIL and acquire the lock
+ bits_remaining = 0
+ with x.lock, nogil:
+ for i in range(n):
+ if bits_remaining < width:
+ buff = rng.next_uint64(rng.state)
+ random_values[i] = buff & mask
+ buff >>= width
+
+ randoms = np.asarray(random_values)
+ return randoms
+
+# cython example 3
+def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64):
+ """
+ Create an array of `n` uniformly distributed doubles via a "fill" function.
+
+ A 'real' distribution would want to process the values into
+ some non-uniform distribution
+
+ Parameters
+ ----------
+ bit_generator: BitGenerator instance
+ n: int
+ Output vector length
+ dtype: {str, dtype}, optional
+ Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The
+ default dtype value is 'd'
+ """
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef np.ndarray randoms
+
+ capsule = bit_generator.capsule
+ # Optional check that the capsule if from a BitGenerator
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ # Cast the pointer
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+
+ _dtype = np.dtype(dtype)
+ randoms = np.empty(n, dtype=_dtype)
+ if _dtype == np.float32:
+ with bit_generator.lock:
+ random_standard_uniform_fill_f(rng, n, <float*>np.PyArray_DATA(randoms))
+ elif _dtype == np.float64:
+ with bit_generator.lock:
+ random_standard_uniform_fill(rng, n, <double*>np.PyArray_DATA(randoms))
+ else:
+ raise TypeError('Unsupported dtype %r for random' % _dtype)
+ return randoms
+
diff --git a/numpy/random/_examples/cython/setup.py b/numpy/random/_examples/cython/setup.py
new file mode 100644
index 000000000..42425c2c1
--- /dev/null
+++ b/numpy/random/_examples/cython/setup.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+"""
+Build the Cython demonstrations of low-level access to NumPy random
+
+Usage: python setup.py build_ext -i
+"""
+
+import numpy as np
+from distutils.core import setup
+from Cython.Build import cythonize
+from setuptools.extension import Extension
+from os.path import join, dirname
+
+path = dirname(__file__)
+src_dir = join(dirname(path), '..', 'src')
+defs = [('NPY_NO_DEPRECATED_API', 0)]
+inc_path = np.get_include()
+# not so nice. We need the random/lib library from numpy
+lib_path = join(np.get_include(), '..', '..', 'random', 'lib')
+
+extending = Extension("extending",
+ sources=[join(path, 'extending.pyx')],
+ include_dirs=[
+ np.get_include(),
+ join(path, '..', '..')
+ ],
+ define_macros=defs,
+ )
+distributions = Extension("extending_distributions",
+ sources=[join(path, 'extending_distributions.pyx')],
+ include_dirs=[inc_path],
+ library_dirs=[lib_path],
+ libraries=['npyrandom'],
+ define_macros=defs,
+ )
+
+extensions = [extending, distributions]
+
+setup(
+ ext_modules=cythonize(extensions)
+)
diff --git a/numpy/random/_examples/numba/extending.py b/numpy/random/_examples/numba/extending.py
new file mode 100644
index 000000000..0d240596b
--- /dev/null
+++ b/numpy/random/_examples/numba/extending.py
@@ -0,0 +1,84 @@
+import numpy as np
+import numba as nb
+
+from numpy.random import PCG64
+from timeit import timeit
+
+bit_gen = PCG64()
+next_d = bit_gen.cffi.next_double
+state_addr = bit_gen.cffi.state_address
+
+def normals(n, state):
+ out = np.empty(n)
+ for i in range((n + 1) // 2):
+ x1 = 2.0 * next_d(state) - 1.0
+ x2 = 2.0 * next_d(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ while r2 >= 1.0 or r2 == 0.0:
+ x1 = 2.0 * next_d(state) - 1.0
+ x2 = 2.0 * next_d(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ f = np.sqrt(-2.0 * np.log(r2) / r2)
+ out[2 * i] = f * x1
+ if 2 * i + 1 < n:
+ out[2 * i + 1] = f * x2
+ return out
+
+# Compile using Numba
+normalsj = nb.jit(normals, nopython=True)
+# Must use state address not state with numba
+n = 10000
+
+def numbacall():
+ return normalsj(n, state_addr)
+
+rg = np.random.Generator(PCG64())
+
+def numpycall():
+ return rg.normal(size=n)
+
+# Check that the functions work
+r1 = numbacall()
+r2 = numpycall()
+assert r1.shape == (n,)
+assert r1.shape == r2.shape
+
+t1 = timeit(numbacall, number=1000)
+print('{:.2f} secs for {} PCG64 (Numba/PCG64) gaussian randoms'.format(t1, n))
+t2 = timeit(numpycall, number=1000)
+print('{:.2f} secs for {} PCG64 (NumPy/PCG64) gaussian randoms'.format(t2, n))
+
+# example 2
+
+next_u32 = bit_gen.ctypes.next_uint32
+ctypes_state = bit_gen.ctypes.state
+
+@nb.jit(nopython=True)
+def bounded_uint(lb, ub, state):
+ mask = delta = ub - lb
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+
+ val = next_u32(state) & mask
+ while val > delta:
+ val = next_u32(state) & mask
+
+ return lb + val
+
+
+print(bounded_uint(323, 2394691, ctypes_state.value))
+
+
+@nb.jit(nopython=True)
+def bounded_uints(lb, ub, n, state):
+ out = np.empty(n, dtype=np.uint32)
+ for i in range(n):
+ out[i] = bounded_uint(lb, ub, state)
+
+
+bounded_uints(323, 2394691, 10000000, ctypes_state.value)
+
+
diff --git a/numpy/random/examples/numba/extending_distributions.py b/numpy/random/_examples/numba/extending_distributions.py
index 9233ccced..7cf8bf0b0 100644
--- a/numpy/random/examples/numba/extending_distributions.py
+++ b/numpy/random/_examples/numba/extending_distributions.py
@@ -1,22 +1,28 @@
r"""
-On *nix, execute in randomgen/src/distributions
+Building the required library in this example requires a source distribution
+of NumPy or clone of the NumPy git repository since distributions.c is not
+included in binary distributions.
+On *nix, execute in numpy/random/src/distributions
+
+export ${PYTHON_VERSION}=3.8 # Python version
export PYTHON_INCLUDE=#path to Python's include folder, usually \
${PYTHON_HOME}/include/python${PYTHON_VERSION}m
export NUMPY_INCLUDE=#path to numpy's include folder, usually \
${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
gcc -shared -o libdistributions.so -fPIC distributions.c \
-I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
-mv libdistributions.so ../../examples/numba/
+mv libdistributions.so ../../_examples/numba/
On Windows
-rem PYTHON_HOME is setup dependent, this is an example
+rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example
set PYTHON_HOME=c:\Anaconda
+set PYTHON_VERSION=38
cl.exe /LD .\distributions.c -DDLL_EXPORT \
-I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
- -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python36.lib
-move distributions.dll ../../examples/numba/
+ -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib
+move distributions.dll ../../_examples/numba/
"""
import os
@@ -35,19 +41,19 @@ else:
raise RuntimeError('Required DLL/so file was not found.')
ffi.cdef("""
-double random_gauss_zig(void *bitgen_state);
+double random_standard_normal(void *bitgen_state);
""")
x = PCG64()
xffi = x.cffi
bit_generator = xffi.bit_generator
-random_gauss_zig = lib.random_gauss_zig
+random_standard_normal = lib.random_standard_normal
def normals(n, bit_generator):
out = np.empty(n)
for i in range(n):
- out[i] = random_gauss_zig(bit_generator)
+ out[i] = random_standard_normal(bit_generator)
return out
diff --git a/numpy/random/generator.pyx b/numpy/random/_generator.pyx
index df7485a97..dc0ec83f5 100644
--- a/numpy/random/generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -3,36 +3,48 @@
import operator
import warnings
-import numpy as np
-from numpy.core.multiarray import normalize_axis_index
-
-from .bounded_integers import _integers_types
-from .pcg64 import PCG64
-
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
from cpython cimport (Py_INCREF, PyFloat_AsDouble)
-from libc cimport string
cimport cython
+import numpy as np
cimport numpy as np
+from numpy.core.multiarray import normalize_axis_index
-from .bounded_integers cimport *
-from .common cimport *
-from .distributions cimport *
+from .c_distributions cimport *
+from libc cimport string
+from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
+ int32_t, int64_t, INT64_MAX, SIZE_MAX)
+from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
+ _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16,
+ _rand_uint8, _gen_mask)
+from ._pcg64 import PCG64
+from numpy.random cimport bitgen_t
+from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
+ CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1,
+ CONS_GT_1, CONS_POSITIVE_NOT_NAN, CONS_POISSON,
+ double_fill, cont, kahan_sum, cont_broadcast_3, float_fill, cont_f,
+ check_array_constraint, check_constraint, disc, discrete_broadcast_iii,
+ )
+np.import_array()
-__all__ = ['Generator', 'beta', 'binomial', 'bytes', 'chisquare', 'choice',
- 'dirichlet', 'exponential', 'f', 'gamma',
- 'geometric', 'gumbel', 'hypergeometric', 'integers', 'laplace',
- 'logistic', 'lognormal', 'logseries', 'multinomial',
- 'multivariate_normal', 'negative_binomial', 'noncentral_chisquare',
- 'noncentral_f', 'normal', 'pareto', 'permutation',
- 'poisson', 'power', 'random', 'rayleigh', 'shuffle',
- 'standard_cauchy', 'standard_exponential', 'standard_gamma',
- 'standard_normal', 'standard_t', 'triangular',
- 'uniform', 'vonmises', 'wald', 'weibull', 'zipf']
+cdef int64_t _safe_sum_nonneg_int64(size_t num_colors, int64_t *colors):
+ """
+ Sum the values in the array `colors`.
-np.import_array()
+ Return -1 if an overflow occurs.
+ The values in *colors are assumed to be nonnegative.
+ """
+ cdef size_t i
+ cdef int64_t sum
+
+ sum = 0
+ for i in range(num_colors):
+ if colors[i] > INT64_MAX - sum:
+ return -1
+ sum += colors[i]
+ return sum
cdef bint _check_bit_generator(object bitgen):
@@ -103,7 +115,7 @@ cdef class Generator:
capsule = bit_generator.capsule
cdef const char *name = "BitGenerator"
if not PyCapsule_IsValid(capsule, name):
- raise ValueError("Invalid bit generator'. The bit generator must "
+ raise ValueError("Invalid bit generator. The bit generator must "
"be instantiated.")
self._bitgen = (<bitgen_t *> PyCapsule_GetPointer(capsule, name))[0]
self.lock = bit_generator.lock
@@ -141,7 +153,7 @@ cdef class Generator:
def random(self, size=None, dtype=np.float64, out=None):
"""
- random(size=None, dtype='d', out=None)
+ random(size=None, dtype=np.float64, out=None)
Return random floats in the half-open interval [0.0, 1.0).
@@ -157,10 +169,9 @@ cdef class Generator:
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ dtype : dtype, optional
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is not None,
it must have the same shape as the provided size and must match the type of
@@ -191,13 +202,13 @@ cdef class Generator:
"""
cdef double temp
- key = np.dtype(dtype).name
- if key == 'float64':
- return double_fill(&random_double_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
- return float_fill(&random_float, &self._bitgen, size, self.lock, out)
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
+ return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out)
+ elif _dtype == np.float32:
+ return float_fill(&random_standard_uniform_fill_f, &self._bitgen, size, self.lock, out)
else:
- raise TypeError('Unsupported dtype "%s" for random' % key)
+ raise TypeError('Unsupported dtype %r for random' % _dtype)
def beta(self, a, b, size=None):
"""
@@ -296,7 +307,7 @@ cdef class Generator:
def standard_exponential(self, size=None, dtype=np.float64, method=u'zig', out=None):
"""
- standard_exponential(size=None, dtype='d', method='zig', out=None)
+ standard_exponential(size=None, dtype=np.float64, method='zig', out=None)
Draw samples from the standard exponential distribution.
@@ -310,9 +321,8 @@ cdef class Generator:
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
method : str, optional
Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method.
'zig' uses the much faster Ziggurat method of Marsaglia and Tsang.
@@ -333,24 +343,24 @@ cdef class Generator:
>>> n = np.random.default_rng().standard_exponential((3, 8000))
"""
- key = np.dtype(dtype).name
- if key == 'float64':
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
if method == u'zig':
- return double_fill(&random_standard_exponential_zig_fill, &self._bitgen, size, self.lock, out)
- else:
return double_fill(&random_standard_exponential_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
+ else:
+ return double_fill(&random_standard_exponential_inv_fill, &self._bitgen, size, self.lock, out)
+ elif _dtype == np.float32:
if method == u'zig':
- return float_fill(&random_standard_exponential_zig_f, &self._bitgen, size, self.lock, out)
+ return float_fill(&random_standard_exponential_fill_f, &self._bitgen, size, self.lock, out)
else:
- return float_fill(&random_standard_exponential_f, &self._bitgen, size, self.lock, out)
+ return float_fill(&random_standard_exponential_inv_fill_f, &self._bitgen, size, self.lock, out)
else:
- raise TypeError('Unsupported dtype "%s" for standard_exponential'
- % key)
+ raise TypeError('Unsupported dtype %r for standard_exponential'
+ % _dtype)
def integers(self, low, high=None, size=None, dtype=np.int64, endpoint=False):
"""
- integers(low, high=None, size=None, dtype='int64', endpoint=False)
+ integers(low, high=None, size=None, dtype=np.int64, endpoint=False)
Return random integers from `low` (inclusive) to `high` (exclusive), or
if endpoint=True, `low` (inclusive) to `high` (inclusive). Replaces
@@ -375,11 +385,9 @@ cdef class Generator:
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result. All dtypes are determined by their
- name, i.e., 'int64', 'int', etc, so byteorder is not available
- and a specific precision may have different C types depending
- on the platform. The default value is 'np.int'.
+ dtype : dtype, optional
+ Desired dtype of the result. Byteorder must be native.
+ The default value is np.int64.
endpoint : bool, optional
If true, sample from the interval [low, high] instead of the
default [low, high)
@@ -438,41 +446,41 @@ cdef class Generator:
high = low
low = 0
- dt = np.dtype(dtype)
- key = dt.name
- if key not in _integers_types:
- raise TypeError('Unsupported dtype "%s" for integers' % key)
- if not dt.isnative:
- raise ValueError('Providing a dtype with a non-native byteorder '
- 'is not supported. If you require '
- 'platform-independent byteorder, call byteswap '
- 'when required.')
+ _dtype = np.dtype(dtype)
# Implementation detail: the old API used a masked method to generate
# bounded uniform integers. Lemire's method is preferable since it is
# faster. randomgen allows a choice, we will always use the faster one.
cdef bint _masked = False
- if key == 'int32':
+ if _dtype == np.int32:
ret = _rand_int32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int64':
+ elif _dtype == np.int64:
ret = _rand_int64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int16':
+ elif _dtype == np.int16:
ret = _rand_int16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int8':
+ elif _dtype == np.int8:
ret = _rand_int8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint64':
+ elif _dtype == np.uint64:
ret = _rand_uint64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint32':
+ elif _dtype == np.uint32:
ret = _rand_uint32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint16':
+ elif _dtype == np.uint16:
ret = _rand_uint16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint8':
+ elif _dtype == np.uint8:
ret = _rand_uint8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'bool':
+ elif _dtype == np.bool_:
ret = _rand_bool(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif not _dtype.isnative:
+ raise ValueError('Providing a dtype with a non-native byteorder '
+ 'is not supported. If you require '
+ 'platform-independent byteorder, call byteswap '
+ 'when required.')
+ else:
+ raise TypeError('Unsupported dtype %r for integers' % _dtype)
+
- if size is None and dtype in (np.bool, np.int, np.long):
+ if size is None and dtype in (bool, int, np.compat.long):
if np.array(ret).shape == ():
return dtype(ret)
return ret
@@ -508,32 +516,32 @@ cdef class Generator:
@cython.wraparound(True)
def choice(self, a, size=None, replace=True, p=None, axis=0, bint shuffle=True):
"""
- choice(a, size=None, replace=True, p=None, axis=0):
+ choice(a, size=None, replace=True, p=None, axis=0, shuffle=True):
Generates a random sample from a given 1-D array
Parameters
----------
- a : 1-D array-like or int
+ a : {array_like, int}
If an ndarray, a random sample is generated from its elements.
- If an int, the random sample is generated as if a were np.arange(a)
- size : int or tuple of ints, optional
+ If an int, the random sample is generated from np.arange(a).
+ size : {int, tuple[int]}, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn from the 1-d `a`. If `a` has more
than one dimension, the `size` shape will be inserted into the
`axis` dimension, so the output ``ndim`` will be ``a.ndim - 1 +
len(size)``. Default is None, in which case a single value is
returned.
- replace : boolean, optional
+ replace : bool, optional
Whether the sample is with or without replacement
- p : 1-D array-like, optional
+ p : 1-D array_like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
axis : int, optional
The axis along which the selection is performed. The default, 0,
selects by row.
- shuffle : boolean, optional
+ shuffle : bool, optional
Whether the sample is shuffled when sampling without replacement.
Default is True, False provides a speedup.
@@ -604,13 +612,15 @@ cdef class Generator:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
- raise ValueError("a must be 1-dimensional or an integer")
+ raise ValueError("a must an array or an integer")
if pop_size <= 0 and np.prod(size) != 0:
- raise ValueError("a must be greater than 0 unless no samples are taken")
+ raise ValueError("a must be a positive integer unless no"
+ "samples are taken")
else:
pop_size = a.shape[axis]
if pop_size == 0 and np.prod(size) != 0:
- raise ValueError("'a' cannot be empty unless no samples are taken")
+ raise ValueError("a cannot be empty unless no samples are"
+ "taken")
if p is not None:
d = len(p)
@@ -625,9 +635,9 @@ cdef class Generator:
pix = <double*>np.PyArray_DATA(p)
if p.ndim != 1:
- raise ValueError("'p' must be 1-dimensional")
+ raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
- raise ValueError("'a' and 'p' must have same size")
+ raise ValueError("a and p must have same size")
p_sum = kahan_sum(pix, d)
if np.isnan(p_sum):
raise ValueError("probabilities contain NaN")
@@ -649,13 +659,14 @@ cdef class Generator:
cdf /= cdf[-1]
uniform_samples = self.random(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
- idx = np.array(idx, copy=False, dtype=np.int64) # searchsorted returns a scalar
+ # searchsorted returns a scalar
+ idx = np.array(idx, copy=False, dtype=np.int64)
else:
idx = self.integers(0, pop_size, size=shape, dtype=np.int64)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
- "population when 'replace=False'")
+ "population when replace is False")
elif size < 0:
raise ValueError("negative dimensions are not allowed")
@@ -781,7 +792,6 @@ cdef class Generator:
--------
integers : Discrete uniform distribution, yielding integers.
random : Floats uniformly distributed over ``[0, 1)``.
- random : Alias for `random`.
Notes
-----
@@ -857,7 +867,7 @@ cdef class Generator:
# Complicated, continuous distributions:
def standard_normal(self, size=None, dtype=np.float64, out=None):
"""
- standard_normal(size=None, dtype='d', out=None)
+ standard_normal(size=None, dtype=np.float64, out=None)
Draw samples from a standard Normal distribution (mean=0, stdev=1).
@@ -867,10 +877,9 @@ cdef class Generator:
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ dtype : dtype, optional
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is not None,
it must have the same shape as the provided size and must match the type of
@@ -882,6 +891,12 @@ cdef class Generator:
A floating-point array of shape ``size`` of drawn samples, or a
single sample if ``size`` was not specified.
+ See Also
+ --------
+ normal :
+ Equivalent function with additional ``loc`` and ``scale`` arguments
+ for setting the mean and standard deviation.
+
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
@@ -889,12 +904,6 @@ cdef class Generator:
mu + sigma * gen.standard_normal(size=...)
gen.normal(mu, sigma, size=...)
- See Also
- --------
- normal :
- Equivalent function with additional ``loc`` and ``scale`` arguments
- for setting the mean and standard deviation.
-
Examples
--------
>>> rng = np.random.default_rng()
@@ -918,14 +927,13 @@ cdef class Generator:
[ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
"""
- key = np.dtype(dtype).name
- if key == 'float64':
- return double_fill(&random_gauss_zig_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
- return float_fill(&random_gauss_zig_f, &self._bitgen, size, self.lock, out)
-
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
+ return double_fill(&random_standard_normal_fill, &self._bitgen, size, self.lock, out)
+ elif _dtype == np.float32:
+ return float_fill(&random_standard_normal_fill_f, &self._bitgen, size, self.lock, out)
else:
- raise TypeError('Unsupported dtype "%s" for standard_normal' % key)
+ raise TypeError('Unsupported dtype %r for standard_normal' % _dtype)
def normal(self, loc=0.0, scale=1.0, size=None):
"""
@@ -1023,7 +1031,7 @@ cdef class Generator:
[ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
"""
- return cont(&random_normal_zig, &self._bitgen, size, self.lock, 2,
+ return cont(&random_normal, &self._bitgen, size, self.lock, 2,
loc, '', CONS_NONE,
scale, 'scale', CONS_NON_NEGATIVE,
0.0, '', CONS_NONE,
@@ -1031,7 +1039,7 @@ cdef class Generator:
def standard_gamma(self, shape, size=None, dtype=np.float64, out=None):
"""
- standard_gamma(shape, size=None, dtype='d', out=None)
+ standard_gamma(shape, size=None, dtype=np.float64, out=None)
Draw samples from a standard Gamma distribution.
@@ -1047,10 +1055,9 @@ cdef class Generator:
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``shape`` is a scalar. Otherwise,
``np.array(shape).size`` samples are drawn.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ dtype : dtype, optional
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is
not None, it must have the same shape as the provided size and
@@ -1107,19 +1114,19 @@ cdef class Generator:
"""
cdef void *func
- key = np.dtype(dtype).name
- if key == 'float64':
- return cont(&random_standard_gamma_zig, &self._bitgen, size, self.lock, 1,
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
+ return cont(&random_standard_gamma, &self._bitgen, size, self.lock, 1,
shape, 'shape', CONS_NON_NEGATIVE,
0.0, '', CONS_NONE,
0.0, '', CONS_NONE,
out)
- if key == 'float32':
- return cont_f(&random_standard_gamma_zig_f, &self._bitgen, size, self.lock,
+ if _dtype == np.float32:
+ return cont_f(&random_standard_gamma_f, &self._bitgen, size, self.lock,
shape, 'shape', CONS_NON_NEGATIVE,
out)
else:
- raise TypeError('Unsupported dtype "%s" for standard_gamma' % key)
+ raise TypeError('Unsupported dtype %r for standard_gamma' % _dtype)
def gamma(self, shape, scale=1.0, size=None):
"""
@@ -2796,7 +2803,6 @@ cdef class Generator:
it = np.PyArray_MultiIterNew2(p_arr, n_arr)
randoms = <np.ndarray>np.empty(it.shape, np.int64)
- randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
cnt = np.PyArray_SIZE(randoms)
it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr)
@@ -2838,14 +2844,14 @@ cdef class Generator:
Samples are drawn from a negative binomial distribution with specified
parameters, `n` successes and `p` probability of success where `n`
- is > 0 and `p` is in the interval [0, 1].
+ is > 0 and `p` is in the interval (0, 1].
Parameters
----------
n : float or array_like of floats
Parameter of the distribution, > 0.
p : float or array_like of floats
- Parameter of the distribution, >= 0 and <=1.
+ Parameter of the distribution. Must satisfy 0 < p <= 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2903,7 +2909,7 @@ cdef class Generator:
"""
return disc(&random_negative_binomial, &self._bitgen, size, self.lock, 2, 0,
n, 'n', CONS_POSITIVE_NOT_NAN,
- p, 'p', CONS_BOUNDED_0_1,
+ p, 'p', CONS_BOUNDED_GT_0_1,
0.0, '', CONS_NONE)
def poisson(self, lam=1.0, size=None):
@@ -3147,6 +3153,8 @@ cdef class Generator:
See Also
--------
+ multivariate_hypergeometric : Draw samples from the multivariate
+ hypergeometric distribution.
scipy.stats.hypergeom : probability density function, distribution or
cumulative density function, etc.
@@ -3332,7 +3340,7 @@ cdef class Generator:
# Multivariate distributions:
def multivariate_normal(self, mean, cov, size=None, check_valid='warn',
- tol=1e-8):
+ tol=1e-8, *, method='svd'):
"""
multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8)
@@ -3362,6 +3370,15 @@ cdef class Generator:
tol : float, optional
Tolerance when checking the singular values in covariance matrix.
cov is cast to double before the check.
+ method : { 'svd', 'eigh', 'cholesky'}, optional
+ The cov input is used to compute a factor matrix A such that
+ ``A @ A.T = cov``. This argument is used to select the method
+ used to compute the factor matrix A. The default method 'svd' is
+ the slowest, while 'cholesky' is the fastest but less robust than
+ the slowest method. The method `eigh` uses eigen decomposition to
+ compute A and is faster than svd but slower than cholesky.
+
+ .. versionadded:: 1.18.0
Returns
-------
@@ -3422,10 +3439,16 @@ cdef class Generator:
--------
>>> mean = (1, 2)
>>> cov = [[1, 0], [0, 1]]
- >>> x = np.random.default_rng().multivariate_normal(mean, cov, (3, 3))
+ >>> rng = np.random.default_rng()
+ >>> x = rng.multivariate_normal(mean, cov, (3, 3))
>>> x.shape
(3, 3, 2)
+ We can use a different method other than the default to factorize cov:
+ >>> y = rng.multivariate_normal(mean, cov, (3, 3), method='cholesky')
+ >>> y.shape
+ (3, 3, 2)
+
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
@@ -3433,7 +3456,9 @@ cdef class Generator:
[True, True] # random
"""
- from numpy.dual import svd
+ if method not in {'eigh', 'svd', 'cholesky'}:
+ raise ValueError(
+ "method must be one of {'eigh', 'svd', 'cholesky'}")
# Check preconditions on arguments
mean = np.array(mean)
@@ -3476,13 +3501,27 @@ cdef class Generator:
# GH10839, ensure double to make tol meaningful
cov = cov.astype(np.double)
- (u, s, v) = svd(cov)
+ if method == 'svd':
+ from numpy.dual import svd
+ (u, s, vh) = svd(cov)
+ elif method == 'eigh':
+ from numpy.dual import eigh
+ # could call linalg.svd(hermitian=True), but that calculates a vh we don't need
+ (s, u) = eigh(cov)
+ else:
+ from numpy.dual import cholesky
+ l = cholesky(cov)
- if check_valid != 'ignore':
+ # make sure check_valid is ignored whe method == 'cholesky'
+ # since the decomposition will have failed if cov is not valid.
+ if check_valid != 'ignore' and method != 'cholesky':
if check_valid != 'warn' and check_valid != 'raise':
- raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'")
-
- psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol)
+ raise ValueError(
+ "check_valid must equal 'warn', 'raise', or 'ignore'")
+ if method == 'svd':
+ psd = np.allclose(np.dot(vh.T * s, vh), cov, rtol=tol, atol=tol)
+ else:
+ psd = not np.any(s < -tol)
if not psd:
if check_valid == 'warn':
warnings.warn("covariance is not positive-semidefinite.",
@@ -3490,8 +3529,17 @@ cdef class Generator:
else:
raise ValueError("covariance is not positive-semidefinite.")
- x = np.dot(x, np.sqrt(s)[:, None] * v)
- x += mean
+ if method == 'cholesky':
+ _factor = l
+ elif method == 'eigh':
+ # if check_valid == 'ignore' we need to ensure that np.sqrt does not
+ # return a NaN if s is a very small negative number that is
+ # approximately zero or when the covariance is not positive-semidefinite
+ _factor = u * np.sqrt(abs(s))
+ else:
+ _factor = u * np.sqrt(s)
+
+ x = mean + x @ _factor.T
x.shape = tuple(final_shape)
return x
@@ -3594,8 +3642,8 @@ cdef class Generator:
d = len(pvals)
on = <np.ndarray>np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ALIGNED)
- parr = <np.ndarray>np.PyArray_FROM_OTF(
- pvals, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ parr = <np.ndarray>np.PyArray_FROMANY(
+ pvals, np.NPY_DOUBLE, 1, 1, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
pix = <double*>np.PyArray_DATA(parr)
check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1)
if kahan_sum(pix, d-1) > (1.0 + 1e-12):
@@ -3645,6 +3693,222 @@ cdef class Generator:
return multin
+ def multivariate_hypergeometric(self, object colors, object nsample,
+ size=None, method='marginals'):
+ """
+ multivariate_hypergeometric(colors, nsample, size=None,
+ method='marginals')
+
+ Generate variates from a multivariate hypergeometric distribution.
+
+ The multivariate hypergeometric distribution is a generalization
+ of the hypergeometric distribution.
+
+ Choose ``nsample`` items at random without replacement from a
+ collection with ``N`` distinct types. ``N`` is the length of
+ ``colors``, and the values in ``colors`` are the number of occurrences
+ of that type in the collection. The total number of items in the
+ collection is ``sum(colors)``. Each random variate generated by this
+ function is a vector of length ``N`` holding the counts of the
+ different types that occurred in the ``nsample`` items.
+
+ The name ``colors`` comes from a common description of the
+ distribution: it is the probability distribution of the number of
+ marbles of each color selected without replacement from an urn
+ containing marbles of different colors; ``colors[i]`` is the number
+ of marbles in the urn with color ``i``.
+
+ Parameters
+ ----------
+ colors : sequence of integers
+ The number of each type of item in the collection from which
+ a sample is drawn. The values in ``colors`` must be nonnegative.
+ To avoid loss of precision in the algorithm, ``sum(colors)``
+ must be less than ``10**9`` when `method` is "marginals".
+ nsample : int
+ The number of items selected. ``nsample`` must not be greater
+ than ``sum(colors)``.
+ size : int or tuple of ints, optional
+ The number of variates to generate, either an integer or a tuple
+ holding the shape of the array of variates. If the given size is,
+ e.g., ``(k, m)``, then ``k * m`` variates are drawn, where one
+ variate is a vector of length ``len(colors)``, and the return value
+ has shape ``(k, m, len(colors))``. If `size` is an integer, the
+ output has shape ``(size, len(colors))``. Default is None, in
+ which case a single variate is returned as an array with shape
+ ``(len(colors),)``.
+ method : string, optional
+ Specify the algorithm that is used to generate the variates.
+ Must be 'count' or 'marginals' (the default). See the Notes
+ for a description of the methods.
+
+ Returns
+ -------
+ variates : ndarray
+ Array of variates drawn from the multivariate hypergeometric
+ distribution.
+
+ See Also
+ --------
+ hypergeometric : Draw samples from the (univariate) hypergeometric
+ distribution.
+
+ Notes
+ -----
+ The two methods do not return the same sequence of variates.
+
+ The "count" algorithm is roughly equivalent to the following numpy
+ code::
+
+ choices = np.repeat(np.arange(len(colors)), colors)
+ selection = np.random.choice(choices, nsample, replace=False)
+ variate = np.bincount(selection, minlength=len(colors))
+
+ The "count" algorithm uses a temporary array of integers with length
+ ``sum(colors)``.
+
+ The "marginals" algorithm generates a variate by using repeated
+ calls to the univariate hypergeometric sampler. It is roughly
+ equivalent to::
+
+ variate = np.zeros(len(colors), dtype=np.int64)
+ # `remaining` is the cumulative sum of `colors` from the last
+ # element to the first; e.g. if `colors` is [3, 1, 5], then
+ # `remaining` is [9, 6, 5].
+ remaining = np.cumsum(colors[::-1])[::-1]
+ for i in range(len(colors)-1):
+ if nsample < 1:
+ break
+ variate[i] = hypergeometric(colors[i], remaining[i+1],
+ nsample)
+ nsample -= variate[i]
+ variate[-1] = nsample
+
+ The default method is "marginals". For some cases (e.g. when
+ `colors` contains relatively small integers), the "count" method
+ can be significantly faster than the "marginals" method. If
+ performance of the algorithm is important, test the two methods
+ with typical inputs to decide which works best.
+
+ .. versionadded:: 1.18.0
+
+ Examples
+ --------
+ >>> colors = [16, 8, 4]
+ >>> seed = 4861946401452
+ >>> gen = np.random.Generator(np.random.PCG64(seed))
+ >>> gen.multivariate_hypergeometric(colors, 6)
+ array([5, 0, 1])
+ >>> gen.multivariate_hypergeometric(colors, 6, size=3)
+ array([[5, 0, 1],
+ [2, 2, 2],
+ [3, 3, 0]])
+ >>> gen.multivariate_hypergeometric(colors, 6, size=(2, 2))
+ array([[[3, 2, 1],
+ [3, 2, 1]],
+ [[4, 1, 1],
+ [3, 2, 1]]])
+ """
+ cdef int64_t nsamp
+ cdef size_t num_colors
+ cdef int64_t total
+ cdef int64_t *colors_ptr
+ cdef int64_t max_index
+ cdef size_t num_variates
+ cdef int64_t *variates_ptr
+ cdef int result
+
+ if method not in ['count', 'marginals']:
+ raise ValueError('method must be "count" or "marginals".')
+
+ try:
+ operator.index(nsample)
+ except TypeError:
+ raise ValueError('nsample must be an integer')
+
+ if nsample < 0:
+ raise ValueError("nsample must be nonnegative.")
+ if nsample > INT64_MAX:
+ raise ValueError("nsample must not exceed %d" % INT64_MAX)
+ nsamp = nsample
+
+ # Validation of colors, a 1-d sequence of nonnegative integers.
+ invalid_colors = False
+ try:
+ colors = np.asarray(colors)
+ if colors.ndim != 1:
+ invalid_colors = True
+ elif colors.size > 0 and not np.issubdtype(colors.dtype,
+ np.integer):
+ invalid_colors = True
+ elif np.any((colors < 0) | (colors > INT64_MAX)):
+ invalid_colors = True
+ except ValueError:
+ invalid_colors = True
+ if invalid_colors:
+ raise ValueError('colors must be a one-dimensional sequence '
+ 'of nonnegative integers not exceeding %d.' %
+ INT64_MAX)
+
+ colors = np.ascontiguousarray(colors, dtype=np.int64)
+ num_colors = colors.size
+
+ colors_ptr = <int64_t *> np.PyArray_DATA(colors)
+
+ total = _safe_sum_nonneg_int64(num_colors, colors_ptr)
+ if total == -1:
+ raise ValueError("sum(colors) must not exceed the maximum value "
+ "of a 64 bit signed integer (%d)" % INT64_MAX)
+
+ if method == 'marginals' and total >= 1000000000:
+ raise ValueError('When method is "marginals", sum(colors) must '
+ 'be less than 1000000000.')
+
+ # The C code that implements the 'count' method will malloc an
+ # array of size total*sizeof(size_t). Here we ensure that that
+ # product does not overflow.
+ if SIZE_MAX > <uint64_t>INT64_MAX:
+ max_index = INT64_MAX // sizeof(size_t)
+ else:
+ max_index = SIZE_MAX // sizeof(size_t)
+ if method == 'count' and total > max_index:
+ raise ValueError("When method is 'count', sum(colors) must not "
+ "exceed %d" % max_index)
+ if nsamp > total:
+ raise ValueError("nsample > sum(colors)")
+
+ # Figure out the shape of the return array.
+ if size is None:
+ shape = (num_colors,)
+ elif np.isscalar(size):
+ shape = (size, num_colors)
+ else:
+ shape = tuple(size) + (num_colors,)
+ variates = np.zeros(shape, dtype=np.int64)
+
+ if num_colors == 0:
+ return variates
+
+ # One variate is a vector of length num_colors.
+ num_variates = variates.size // num_colors
+ variates_ptr = <int64_t *> np.PyArray_DATA(variates)
+
+ if method == 'count':
+ with self.lock, nogil:
+ result = random_multivariate_hypergeometric_count(&self._bitgen,
+ total, num_colors, colors_ptr, nsamp,
+ num_variates, variates_ptr)
+ if result == -1:
+ raise MemoryError("Insufficient memory for multivariate_"
+ "hypergeometric with method='count' and "
+ "sum(colors)=%d" % total)
+ else:
+ with self.lock, nogil:
+ random_multivariate_hypergeometric_marginals(&self._bitgen,
+ total, num_colors, colors_ptr, nsamp,
+ num_variates, variates_ptr)
+ return variates
+
def dirichlet(self, object alpha, size=None):
"""
dirichlet(alpha, size=None)
@@ -3773,7 +4037,7 @@ cdef class Generator:
while i < totsize:
acc = 0.0
for j in range(k):
- val_data[i+j] = random_standard_gamma_zig(&self._bitgen,
+ val_data[i+j] = random_standard_gamma(&self._bitgen,
alpha_data[j])
acc = acc + val_data[i + j]
invacc = 1/acc
@@ -3964,7 +4228,7 @@ cdef class Generator:
>>> rng.permutation("abc")
Traceback (most recent call last):
...
- numpy.AxisError: x must be an integer or at least 1-dimensional
+ numpy.AxisError: axis 0 is out of bounds for array of dimension 0
>>> arr = np.arange(9).reshape((3, 3))
>>> rng.permutation(arr, axis=1)
@@ -4003,21 +4267,24 @@ def default_rng(seed=None):
Parameters
----------
- seed : {None, int, array_like[ints], ISeedSequence, BitGenerator, Generator}, optional
+ seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
+ pass in a`SeedSequence` instance
Additionally, when passed a `BitGenerator`, it will be wrapped by
`Generator`. If passed a `Generator`, it will be returned unaltered.
+ Returns
+ -------
+ Generator
+ The initialized generator object.
+
Notes
-----
- When `seed` is omitted or ``None``, a new `BitGenerator` and `Generator` will
- be instantiated each time. This function does not manage a default global
- instance.
+ If ``seed`` is not a `BitGenerator` or a `Generator`, a new `BitGenerator`
+ is instantiated. This function does not manage a default global instance.
"""
if _check_bit_generator(seed):
# We were passed a BitGenerator, so just wrap it up.
diff --git a/numpy/random/mt19937.pyx b/numpy/random/_mt19937.pyx
index 7d0f6cd22..919a96a4c 100644
--- a/numpy/random/mt19937.pyx
+++ b/numpy/random/_mt19937.pyx
@@ -3,8 +3,8 @@ import operator
import numpy as np
cimport numpy as np
-from .common cimport *
-from .bit_generator cimport BitGenerator, SeedSequence
+from libc.stdint cimport uint32_t, uint64_t
+from numpy.random cimport BitGenerator, SeedSequence
__all__ = ['MT19937']
@@ -48,13 +48,12 @@ cdef class MT19937(BitGenerator):
Parameters
----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
+ seed : {None, int, array_like[ints], SeedSequence}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
+ pass in a `SeedSequence` instance.
Attributes
----------
diff --git a/numpy/random/pcg64.pyx b/numpy/random/_pcg64.pyx
index 585520139..05d27db5c 100644
--- a/numpy/random/pcg64.pyx
+++ b/numpy/random/_pcg64.pyx
@@ -1,8 +1,9 @@
import numpy as np
cimport numpy as np
-from .common cimport *
-from .bit_generator cimport BitGenerator
+from libc.stdint cimport uint32_t, uint64_t
+from ._common cimport uint64_to_double, wrap_int
+from numpy.random cimport BitGenerator
__all__ = ['PCG64']
@@ -43,13 +44,12 @@ cdef class PCG64(BitGenerator):
Parameters
----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
+ seed : {None, int, array_like[ints], SeedSequence}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
+ pass in a `SeedSequence` instance.
Notes
-----
diff --git a/numpy/random/philox.pyx b/numpy/random/_philox.pyx
index 8b7683017..7e8880490 100644
--- a/numpy/random/philox.pyx
+++ b/numpy/random/_philox.pyx
@@ -6,9 +6,11 @@ except ImportError:
from dummy_threading import Lock
import numpy as np
+cimport numpy as np
-from .common cimport *
-from .bit_generator cimport BitGenerator
+from libc.stdint cimport uint32_t, uint64_t
+from ._common cimport uint64_to_double, int_to_array, wrap_int
+from numpy.random cimport BitGenerator
__all__ = ['Philox']
@@ -62,21 +64,20 @@ cdef class Philox(BitGenerator):
Parameters
----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
+ seed : {None, int, array_like[ints], SeedSequence}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
+ pass in a `SeedSequence` instance.
counter : {None, int, array_like}, optional
Counter to use in the Philox state. Can be either
a Python int (long in 2.x) in [0, 2**256) or a 4-element uint64 array.
If not provided, the RNG is initialized at 0.
key : {None, int, array_like}, optional
- Key to use in the Philox state. Unlike seed, the value in key is
+ Key to use in the Philox state. Unlike ``seed``, the value in key is
directly set. Can be either a Python int in [0, 2**128) or a 2-element
- uint64 array. `key` and `seed` cannot both be used.
+ uint64 array. `key` and ``seed`` cannot both be used.
Attributes
----------
@@ -108,10 +109,10 @@ cdef class Philox(BitGenerator):
randoms produced. The second is a key which determined the sequence
produced. Using different keys produces independent sequences.
- The input seed is processed by `SeedSequence` to generate the key. The
+ The input ``seed`` is processed by `SeedSequence` to generate the key. The
counter is set to 0.
- Alternately, one can omit the seed parameter and set the ``key`` and
+ Alternately, one can omit the ``seed`` parameter and set the ``key`` and
``counter`` directly.
**Parallel Features**
@@ -146,7 +147,7 @@ cdef class Philox(BitGenerator):
**Compatibility Guarantee**
- ``Philox`` makes a guarantee that a fixed seed will always produce
+ ``Philox`` makes a guarantee that a fixed ``seed`` will always produce
the same random integer stream.
Examples
diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py
index 3b58f21e8..29ff69644 100644
--- a/numpy/random/_pickle.py
+++ b/numpy/random/_pickle.py
@@ -1,10 +1,10 @@
from .mtrand import RandomState
-from .philox import Philox
-from .pcg64 import PCG64
-from .sfc64 import SFC64
+from ._philox import Philox
+from ._pcg64 import PCG64
+from ._sfc64 import SFC64
-from .generator import Generator
-from .mt19937 import MT19937
+from ._generator import Generator
+from ._mt19937 import MT19937
BitGenerators = {'MT19937': MT19937,
'PCG64': PCG64,
diff --git a/numpy/random/sfc64.pyx b/numpy/random/_sfc64.pyx
index a881096e9..1daee34f8 100644
--- a/numpy/random/sfc64.pyx
+++ b/numpy/random/_sfc64.pyx
@@ -1,8 +1,9 @@
import numpy as np
cimport numpy as np
-from .common cimport *
-from .bit_generator cimport BitGenerator
+from libc.stdint cimport uint32_t, uint64_t
+from ._common cimport uint64_to_double
+from numpy.random cimport BitGenerator
__all__ = ['SFC64']
@@ -38,13 +39,12 @@ cdef class SFC64(BitGenerator):
Parameters
----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
+ seed : {None, int, array_like[ints], SeedSequence}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
+ pass in a `SeedSequence` instance.
Notes
-----
diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd
index 984033f17..bd5e47a20 100644
--- a/numpy/random/bit_generator.pxd
+++ b/numpy/random/bit_generator.pxd
@@ -1,6 +1,15 @@
-
-from .common cimport bitgen_t, uint32_t
cimport numpy as np
+from libc.stdint cimport uint32_t, uint64_t
+
+cdef extern from "numpy/random/bitgen.h":
+ struct bitgen:
+ void *state
+ uint64_t (*next_uint64)(void *st) nogil
+ uint32_t (*next_uint32)(void *st) nogil
+ double (*next_double)(void *st) nogil
+ uint64_t (*next_raw)(void *st) nogil
+
+ ctypedef bitgen bitgen_t
cdef class BitGenerator():
cdef readonly object _seed_seq
diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx
index eb608af6c..f145ec13d 100644
--- a/numpy/random/bit_generator.pyx
+++ b/numpy/random/bit_generator.pyx
@@ -53,9 +53,7 @@ from cpython.pycapsule cimport PyCapsule_New
import numpy as np
cimport numpy as np
-from libc.stdint cimport uint32_t
-from .common cimport (random_raw, benchmark, prepare_ctypes, prepare_cffi)
-from .distributions cimport bitgen_t
+from ._common cimport (random_raw, benchmark, prepare_ctypes, prepare_cffi)
__all__ = ['SeedSequence', 'BitGenerator']
@@ -484,13 +482,12 @@ cdef class BitGenerator():
Parameters
----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
+ seed : {None, int, array_like[ints], SeedSequence}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
- `SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
+ ~`numpy.random.SeedSequence` to derive the initial `BitGenerator` state.
+ One may also pass in a `SeedSequence` instance.
Attributes
----------
diff --git a/numpy/random/c_distributions.pxd b/numpy/random/c_distributions.pxd
new file mode 100644
index 000000000..6f905edc1
--- /dev/null
+++ b/numpy/random/c_distributions.pxd
@@ -0,0 +1,114 @@
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
+from numpy cimport npy_intp
+
+from libc.stdint cimport (uint64_t, int32_t, int64_t)
+from numpy.random cimport bitgen_t
+
+cdef extern from "numpy/random/distributions.h":
+
+ struct s_binomial_t:
+ int has_binomial
+ double psave
+ int64_t nsave
+ double r
+ double q
+ double fm
+ int64_t m
+ double p1
+ double xm
+ double xl
+ double xr
+ double c
+ double laml
+ double lamr
+ double p2
+ double p3
+ double p4
+
+ ctypedef s_binomial_t binomial_t
+
+ double random_standard_uniform(bitgen_t *bitgen_state) nogil
+ void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil
+ double random_standard_exponential(bitgen_t *bitgen_state) nogil
+ double random_standard_exponential_f(bitgen_t *bitgen_state) nogil
+ void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ double random_standard_normal(bitgen_t* bitgen_state) nogil
+ void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil
+ void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil
+ double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil
+
+ float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
+ void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil
+ float random_standard_normal_f(bitgen_t* bitgen_state) nogil
+ float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
+
+ int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
+ int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
+ int64_t random_positive_int(bitgen_t *bitgen_state) nogil
+ uint64_t random_uint(bitgen_t *bitgen_state) nogil
+
+ double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil
+
+ double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
+ float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil
+
+ double random_exponential(bitgen_t *bitgen_state, double scale) nogil
+ double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
+ double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
+ double random_chisquare(bitgen_t *bitgen_state, double df) nogil
+ double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
+ double random_standard_cauchy(bitgen_t *bitgen_state) nogil
+ double random_pareto(bitgen_t *bitgen_state, double a) nogil
+ double random_weibull(bitgen_t *bitgen_state, double a) nogil
+ double random_power(bitgen_t *bitgen_state, double a) nogil
+ double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
+ double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
+ double random_standard_t(bitgen_t *bitgen_state, double df) nogil
+ double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+ double nonc) nogil
+ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
+ double dfden, double nonc) nogil
+ double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
+ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
+ double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right) nogil
+
+ int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
+ int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
+ int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
+ int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
+ int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
+ int64_t sample) nogil
+
+ uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
+
+ # Generate random uint64 numbers in closed interval [off, off + rng].
+ uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
+ uint64_t off, uint64_t rng,
+ uint64_t mask, bint use_masked) nogil
+
+ void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
+ double *pix, npy_intp d, binomial_t *binomial) nogil
+
+ int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates) nogil
+ void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates) nogil
+
diff --git a/numpy/random/distributions.pxd b/numpy/random/distributions.pxd
deleted file mode 100644
index 75edaee9d..000000000
--- a/numpy/random/distributions.pxd
+++ /dev/null
@@ -1,140 +0,0 @@
-#cython: language_level=3
-
-from .common cimport (uint8_t, uint16_t, uint32_t, uint64_t,
- int32_t, int64_t, bitgen_t)
-import numpy as np
-cimport numpy as np
-
-cdef extern from "src/distributions/distributions.h":
-
- struct s_binomial_t:
- int has_binomial
- double psave
- int64_t nsave
- double r
- double q
- double fm
- int64_t m
- double p1
- double xm
- double xl
- double xr
- double c
- double laml
- double lamr
- double p2
- double p3
- double p4
-
- ctypedef s_binomial_t binomial_t
-
- double random_double(bitgen_t *bitgen_state) nogil
- void random_double_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil
- double random_standard_exponential(bitgen_t *bitgen_state) nogil
- void random_standard_exponential_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
- double random_standard_exponential_zig(bitgen_t *bitgen_state) nogil
- void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
- double random_gauss_zig(bitgen_t* bitgen_state) nogil
- void random_gauss_zig_fill(bitgen_t *bitgen_state, np.npy_intp count, double *out) nogil
- double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape) nogil
-
- float random_float(bitgen_t *bitgen_state) nogil
- float random_standard_exponential_f(bitgen_t *bitgen_state) nogil
- float random_standard_exponential_zig_f(bitgen_t *bitgen_state) nogil
- float random_gauss_zig_f(bitgen_t* bitgen_state) nogil
- float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
- float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape) nogil
-
- int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
- int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
- int64_t random_positive_int(bitgen_t *bitgen_state) nogil
- uint64_t random_uint(bitgen_t *bitgen_state) nogil
-
- double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale) nogil
-
- double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
- float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale) nogil
-
- double random_exponential(bitgen_t *bitgen_state, double scale) nogil
- double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
- double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
- double random_chisquare(bitgen_t *bitgen_state, double df) nogil
- double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
- double random_standard_cauchy(bitgen_t *bitgen_state) nogil
- double random_pareto(bitgen_t *bitgen_state, double a) nogil
- double random_weibull(bitgen_t *bitgen_state, double a) nogil
- double random_power(bitgen_t *bitgen_state, double a) nogil
- double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
- double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
- double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
- double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
- double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
- double random_standard_t(bitgen_t *bitgen_state, double df) nogil
- double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
- double nonc) nogil
- double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
- double dfden, double nonc) nogil
- double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
- double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
- double random_triangular(bitgen_t *bitgen_state, double left, double mode,
- double right) nogil
-
- int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
- int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
- int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
- int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
- int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
- int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
- int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
- int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
- int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
- int64_t sample) nogil
-
- uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
-
- # Generate random uint64 numbers in closed interval [off, off + rng].
- uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
- uint64_t off, uint64_t rng,
- uint64_t mask, bint use_masked) nogil
-
- # Generate random uint32 numbers in closed interval [off, off + rng].
- uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
- uint32_t off, uint32_t rng,
- uint32_t mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
- uint16_t off, uint16_t rng,
- uint16_t mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state,
- uint8_t off, uint8_t rng,
- uint8_t mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- np.npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state,
- np.npy_bool off, np.npy_bool rng,
- np.npy_bool mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
-
- void random_bounded_uint64_fill(bitgen_t *bitgen_state,
- uint64_t off, uint64_t rng, np.npy_intp cnt,
- bint use_masked,
- uint64_t *out) nogil
- void random_bounded_uint32_fill(bitgen_t *bitgen_state,
- uint32_t off, uint32_t rng, np.npy_intp cnt,
- bint use_masked,
- uint32_t *out) nogil
- void random_bounded_uint16_fill(bitgen_t *bitgen_state,
- uint16_t off, uint16_t rng, np.npy_intp cnt,
- bint use_masked,
- uint16_t *out) nogil
- void random_bounded_uint8_fill(bitgen_t *bitgen_state,
- uint8_t off, uint8_t rng, np.npy_intp cnt,
- bint use_masked,
- uint8_t *out) nogil
- void random_bounded_bool_fill(bitgen_t *bitgen_state,
- np.npy_bool off, np.npy_bool rng, np.npy_intp cnt,
- bint use_masked,
- np.npy_bool *out) nogil
-
- void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
- double *pix, np.npy_intp d, binomial_t *binomial) nogil
diff --git a/numpy/random/examples/cython/extending_distributions.pyx b/numpy/random/examples/cython/extending_distributions.pyx
deleted file mode 100644
index 3cefec97e..000000000
--- a/numpy/random/examples/cython/extending_distributions.pyx
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-#cython: language_level=3
-"""
-This file shows how the distributions that are accessed through
-distributions.pxd can be used Cython code.
-"""
-import numpy as np
-cimport numpy as np
-cimport cython
-from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
-from numpy.random.common cimport *
-from numpy.random.distributions cimport random_gauss_zig
-from numpy.random import PCG64
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def normals_zig(Py_ssize_t n):
- cdef Py_ssize_t i
- cdef bitgen_t *rng
- cdef const char *capsule_name = "BitGenerator"
- cdef double[::1] random_values
-
- x = PCG64()
- capsule = x.capsule
- if not PyCapsule_IsValid(capsule, capsule_name):
- raise ValueError("Invalid pointer to anon_func_state")
- rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
- random_values = np.empty(n)
- # Best practice is to release GIL and acquire the lock
- with x.lock, nogil:
- for i in range(n):
- random_values[i] = random_gauss_zig(rng)
- randoms = np.asarray(random_values)
- return randoms
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def uniforms(Py_ssize_t n):
- cdef Py_ssize_t i
- cdef bitgen_t *rng
- cdef const char *capsule_name = "BitGenerator"
- cdef double[::1] random_values
-
- x = PCG64()
- capsule = x.capsule
- # Optional check that the capsule if from a BitGenerator
- if not PyCapsule_IsValid(capsule, capsule_name):
- raise ValueError("Invalid pointer to anon_func_state")
- # Cast the pointer
- rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
- random_values = np.empty(n)
- with x.lock, nogil:
- for i in range(n):
- # Call the function
- random_values[i] = rng.next_double(rng.state)
- randoms = np.asarray(random_values)
- return randoms
diff --git a/numpy/random/examples/cython/setup.py b/numpy/random/examples/cython/setup.py
deleted file mode 100644
index 69f057ed5..000000000
--- a/numpy/random/examples/cython/setup.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env python3
-"""
-Build the demos
-
-Usage: python setup.py build_ext -i
-"""
-
-import numpy as np
-from distutils.core import setup
-from Cython.Build import cythonize
-from setuptools.extension import Extension
-from os.path import join
-
-extending = Extension("extending",
- sources=['extending.pyx'],
- include_dirs=[np.get_include()])
-distributions = Extension("extending_distributions",
- sources=['extending_distributions.pyx',
- join('..', '..', 'src',
- 'distributions', 'distributions.c')],
- include_dirs=[np.get_include()])
-
-extensions = [extending, distributions]
-
-setup(
- ext_modules=cythonize(extensions)
-)
diff --git a/numpy/random/examples/numba/extending.py b/numpy/random/examples/numba/extending.py
deleted file mode 100644
index d41c2d76f..000000000
--- a/numpy/random/examples/numba/extending.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import datetime as dt
-
-import numpy as np
-import numba as nb
-
-from numpy.random import PCG64
-
-x = PCG64()
-f = x.ctypes.next_uint32
-s = x.ctypes.state
-
-
-@nb.jit(nopython=True)
-def bounded_uint(lb, ub, state):
- mask = delta = ub - lb
- mask |= mask >> 1
- mask |= mask >> 2
- mask |= mask >> 4
- mask |= mask >> 8
- mask |= mask >> 16
-
- val = f(state) & mask
- while val > delta:
- val = f(state) & mask
-
- return lb + val
-
-
-print(bounded_uint(323, 2394691, s.value))
-
-
-@nb.jit(nopython=True)
-def bounded_uints(lb, ub, n, state):
- out = np.empty(n, dtype=np.uint32)
- for i in range(n):
- out[i] = bounded_uint(lb, ub, state)
-
-
-bounded_uints(323, 2394691, 10000000, s.value)
-
-g = x.cffi.next_double
-cffi_state = x.cffi.state
-state_addr = x.cffi.state_address
-
-
-def normals(n, state):
- out = np.empty(n)
- for i in range((n + 1) // 2):
- x1 = 2.0 * g(state) - 1.0
- x2 = 2.0 * g(state) - 1.0
- r2 = x1 * x1 + x2 * x2
- while r2 >= 1.0 or r2 == 0.0:
- x1 = 2.0 * g(state) - 1.0
- x2 = 2.0 * g(state) - 1.0
- r2 = x1 * x1 + x2 * x2
- f = np.sqrt(-2.0 * np.log(r2) / r2)
- out[2 * i] = f * x1
- if 2 * i + 1 < n:
- out[2 * i + 1] = f * x2
- return out
-
-
-print(normals(10, cffi_state).var())
-# Warm up
-normalsj = nb.jit(normals, nopython=True)
-normalsj(1, state_addr)
-
-start = dt.datetime.now()
-normalsj(1000000, state_addr)
-ms = 1000 * (dt.datetime.now() - start).total_seconds()
-print('1,000,000 Polar-transform (numba/PCG64) randoms in '
- '{ms:0.1f}ms'.format(ms=ms))
-
-start = dt.datetime.now()
-np.random.standard_normal(1000000)
-ms = 1000 * (dt.datetime.now() - start).total_seconds()
-print('1,000,000 Polar-transform (NumPy) randoms in {ms:0.1f}ms'.format(ms=ms))
diff --git a/numpy/random/src/aligned_malloc/aligned_malloc.h b/numpy/random/include/aligned_malloc.h
index ea24f6d23..ea24f6d23 100644
--- a/numpy/random/src/aligned_malloc/aligned_malloc.h
+++ b/numpy/random/include/aligned_malloc.h
diff --git a/numpy/random/src/legacy/legacy-distributions.h b/numpy/random/include/legacy-distributions.h
index 4bc15d58e..b8ba0841c 100644
--- a/numpy/random/src/legacy/legacy-distributions.h
+++ b/numpy/random/include/legacy-distributions.h
@@ -2,7 +2,7 @@
#define _RANDOMDGEN__DISTRIBUTIONS_LEGACY_H_
-#include "../distributions/distributions.h"
+#include "numpy/random/distributions.h"
typedef struct aug_bitgen {
bitgen_t *bit_generator;
diff --git a/numpy/random/legacy_distributions.pxd b/numpy/random/legacy_distributions.pxd
deleted file mode 100644
index c681388db..000000000
--- a/numpy/random/legacy_distributions.pxd
+++ /dev/null
@@ -1,50 +0,0 @@
-#cython: language_level=3
-
-from libc.stdint cimport int64_t
-
-import numpy as np
-cimport numpy as np
-
-from .distributions cimport bitgen_t, binomial_t
-
-cdef extern from "legacy-distributions.h":
-
- struct aug_bitgen:
- bitgen_t *bit_generator
- int has_gauss
- double gauss
-
- ctypedef aug_bitgen aug_bitgen_t
-
- double legacy_gauss(aug_bitgen_t *aug_state) nogil
- double legacy_pareto(aug_bitgen_t *aug_state, double a) nogil
- double legacy_weibull(aug_bitgen_t *aug_state, double a) nogil
- double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape) nogil
- double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale) nogil
- double legacy_standard_t(aug_bitgen_t *aug_state, double df) nogil
-
- double legacy_standard_exponential(aug_bitgen_t *aug_state) nogil
- double legacy_power(aug_bitgen_t *aug_state, double a) nogil
- double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale) nogil
- double legacy_power(aug_bitgen_t *aug_state, double a) nogil
- double legacy_chisquare(aug_bitgen_t *aug_state, double df) nogil
- double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
- double nonc) nogil
- double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden,
- double nonc) nogil
- double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale) nogil
- double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma) nogil
- int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
- int64_t n, binomial_t *binomial) nogil
- int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) nogil
- int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample) nogil
- int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) nogil
- int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) nogil
- int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) nogil
- int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) nogil
- void legacy_random_multinomial(bitgen_t *bitgen_state, long n, long *mnix, double *pix, np.npy_intp d, binomial_t *binomial) nogil
- double legacy_standard_cauchy(aug_bitgen_t *state) nogil
- double legacy_beta(aug_bitgen_t *aug_state, double a, double b) nogil
- double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden) nogil
- double legacy_exponential(aug_bitgen_t *aug_state, double scale) nogil
- double legacy_power(aug_bitgen_t *state, double a) nogil
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index c469a4645..ec1fa352b 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -5,19 +5,99 @@ import warnings
import numpy as np
-from .bounded_integers import _integers_types
-from .mt19937 import MT19937 as _MT19937
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
from cpython cimport (Py_INCREF, PyFloat_AsDouble)
-from libc cimport string
-
cimport cython
cimport numpy as np
-from .bounded_integers cimport *
-from .common cimport *
-from .distributions cimport *
-from .legacy_distributions cimport *
+from libc cimport string
+from libc.stdint cimport int64_t, uint64_t
+from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
+ _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16,
+ _rand_uint8,)
+from ._mt19937 import MT19937 as _MT19937
+from numpy.random cimport bitgen_t
+from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
+ CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1, CONS_GTE_1,
+ CONS_GT_1, LEGACY_CONS_POISSON,
+ double_fill, cont, kahan_sum, cont_broadcast_3,
+ check_array_constraint, check_constraint, disc, discrete_broadcast_iii,
+ )
+
+cdef extern from "numpy/random/distributions.h":
+ struct s_binomial_t:
+ int has_binomial
+ double psave
+ int64_t nsave
+ double r
+ double q
+ double fm
+ int64_t m
+ double p1
+ double xm
+ double xl
+ double xr
+ double c
+ double laml
+ double lamr
+ double p2
+ double p3
+ double p4
+
+ ctypedef s_binomial_t binomial_t
+
+ void random_standard_uniform_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil
+ int64_t random_positive_int(bitgen_t *bitgen_state) nogil
+ double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
+ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
+ double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
+ double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right) nogil
+ uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
+
+cdef extern from "include/legacy-distributions.h":
+ struct aug_bitgen:
+ bitgen_t *bit_generator
+ int has_gauss
+ double gauss
+
+ ctypedef aug_bitgen aug_bitgen_t
+
+ double legacy_gauss(aug_bitgen_t *aug_state) nogil
+ double legacy_pareto(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_weibull(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape) nogil
+ double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale) nogil
+ double legacy_standard_t(aug_bitgen_t *aug_state, double df) nogil
+
+ double legacy_standard_exponential(aug_bitgen_t *aug_state) nogil
+ double legacy_power(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale) nogil
+ double legacy_power(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_chisquare(aug_bitgen_t *aug_state, double df) nogil
+ double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
+ double nonc) nogil
+ double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden,
+ double nonc) nogil
+ double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale) nogil
+ double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma) nogil
+ int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial) nogil
+ int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) nogil
+ int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample) nogil
+ int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) nogil
+ int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) nogil
+ int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) nogil
+ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) nogil
+ void legacy_random_multinomial(bitgen_t *bitgen_state, long n, long *mnix, double *pix, np.npy_intp d, binomial_t *binomial) nogil
+ double legacy_standard_cauchy(aug_bitgen_t *state) nogil
+ double legacy_beta(aug_bitgen_t *aug_state, double a, double b) nogil
+ double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden) nogil
+ double legacy_exponential(aug_bitgen_t *aug_state, double scale) nogil
+ double legacy_power(aug_bitgen_t *state, double a) nogil
np.import_array()
@@ -84,7 +164,7 @@ cdef class RandomState:
--------
Generator
MT19937
- :ref:`bit_generator`
+ numpy.random.BitGenerator
"""
cdef public object _bit_generator
@@ -171,6 +251,12 @@ cdef class RandomState:
For more details, see `set_state`.
+ Parameters
+ ----------
+ legacy : bool, optional
+ Flag indicating to return a legacy tuple state when the BitGenerator
+ is MT19937, instead of a dict.
+
Returns
-------
out : {tuple(str, ndarray of 624 uints, int, int, float), dict}
@@ -182,13 +268,9 @@ cdef class RandomState:
4. an integer ``has_gauss``.
5. a float ``cached_gaussian``.
- If `legacy` is False, or the BitGenerator is not NT19937, then
+ If `legacy` is False, or the BitGenerator is not MT19937, then
state is returned as a dictionary.
- legacy : bool
- Flag indicating the return a legacy tuple state when the BitGenerator
- is MT19937.
-
See Also
--------
set_state
@@ -298,6 +380,10 @@ cdef class RandomState:
(b - a) * random_sample() + a
+ .. note::
+ New code should use the ``random`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
@@ -311,6 +397,10 @@ cdef class RandomState:
Array of random floats of shape `size` (unless ``size=None``, in which
case a single float is returned).
+ See Also
+ --------
+ Generator.random: which should be used for new code.
+
Examples
--------
>>> np.random.random_sample()
@@ -329,7 +419,7 @@ cdef class RandomState:
"""
cdef double temp
- return double_fill(&random_double_fill, &self._bitgen, size, self.lock, None)
+ return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, None)
def random(self, size=None):
"""
@@ -360,6 +450,10 @@ cdef class RandomState:
It is often seen in Bayesian inference and order statistics.
+ .. note::
+ New code should use the ``beta`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -377,6 +471,9 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized beta distribution.
+ See Also
+ --------
+ Generator.beta: which should be used for new code.
"""
return cont(&legacy_beta, &self._aug_state, size, self.lock, 2,
a, 'a', CONS_POSITIVE,
@@ -403,6 +500,10 @@ cdef class RandomState:
the size of raindrops measured over many rainstorms [1]_, or the time
between page requests to Wikipedia [2]_.
+ .. note::
+ New code should use the ``exponential`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
scale : float or array_like of floats
@@ -419,6 +520,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized exponential distribution.
+ See Also
+ --------
+ Generator.exponential: which should be used for new code.
+
References
----------
.. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
@@ -444,6 +549,10 @@ cdef class RandomState:
`standard_exponential` is identical to the exponential distribution
with a scale parameter of 1.
+ .. note::
+ New code should use the ``standard_exponential`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
@@ -456,6 +565,10 @@ cdef class RandomState:
out : float or ndarray
Drawn samples.
+ See Also
+ --------
+ Generator.standard_exponential: which should be used for new code.
+
Examples
--------
Output a 3x8000 array:
@@ -474,7 +587,7 @@ cdef class RandomState:
tomaxint(size=None)
Return a sample of uniformly distributed random integers in the interval
- [0, ``np.iinfo(np.int).max``]. The np.int type translates to the C long
+ [0, ``np.iinfo(np.int_).max``]. The `np.int_` type translates to the C long
integer type and its precision is platform dependent.
Parameters
@@ -503,7 +616,7 @@ cdef class RandomState:
[ 739731006, 1947757578]],
[[1871712945, 752307660],
[1601631370, 1479324245]]])
- >>> rs.tomaxint((2,2,2)) < np.iinfo(np.int).max
+ >>> rs.tomaxint((2,2,2)) < np.iinfo(np.int_).max
array([[[ True, True],
[ True, True]],
[[ True, True],
@@ -529,7 +642,7 @@ cdef class RandomState:
def randint(self, low, high=None, size=None, dtype=int):
"""
- randint(low, high=None, size=None, dtype='l')
+ randint(low, high=None, size=None, dtype=int)
Return random integers from `low` (inclusive) to `high` (exclusive).
@@ -537,6 +650,10 @@ cdef class RandomState:
the specified dtype in the "half-open" interval [`low`, `high`). If
`high` is None (the default), then results are from [0, `low`).
+ .. note::
+ New code should use the ``integers`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
low : int or array-like of ints
@@ -552,10 +669,8 @@ cdef class RandomState:
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
- Desired dtype of the result. All dtypes are determined by their
- name, i.e., 'int64', 'int', etc, so byteorder is not available
- and a specific precision may have different C types depending
- on the platform. The default value is 'np.int'.
+ Desired dtype of the result. Byteorder must be native.
+ The default value is int.
.. versionadded:: 1.11.0
@@ -567,9 +682,10 @@ cdef class RandomState:
See Also
--------
- random.random_integers : similar to `randint`, only for the closed
+ random_integers : similar to `randint`, only for the closed
interval [`low`, `high`], and 1 is the lowest value if `high` is
omitted.
+ Generator.integers: which should be used for new code.
Examples
--------
@@ -605,17 +721,16 @@ cdef class RandomState:
high = low
low = 0
- dt = np.dtype(dtype)
- key = dt.name
- if key not in _integers_types:
- raise TypeError('Unsupported dtype "%s" for randint' % key)
- if not dt.isnative:
+ _dtype = np.dtype(dtype)
+
+ if not _dtype.isnative:
# numpy 1.17.0, 2019-05-28
warnings.warn('Providing a dtype with a non-native byteorder is '
'not supported. If you require platform-independent '
'byteorder, call byteswap when required.\nIn future '
'version, providing byteorder will raise a '
'ValueError', DeprecationWarning)
+ _dtype = _dtype.newbyteorder()
# Implementation detail: the use a masked method to generate
# bounded uniform integers. Lemire's method is preferable since it is
@@ -624,26 +739,28 @@ cdef class RandomState:
cdef bint _masked = True
cdef bint _endpoint = False
- if key == 'int32':
+ if _dtype == np.int32:
ret = _rand_int32(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'int64':
+ elif _dtype == np.int64:
ret = _rand_int64(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'int16':
+ elif _dtype == np.int16:
ret = _rand_int16(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'int8':
+ elif _dtype == np.int8:
ret = _rand_int8(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint64':
+ elif _dtype == np.uint64:
ret = _rand_uint64(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint32':
+ elif _dtype == np.uint32:
ret = _rand_uint32(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint16':
+ elif _dtype == np.uint16:
ret = _rand_uint16(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint8':
+ elif _dtype == np.uint8:
ret = _rand_uint8(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'bool':
+ elif _dtype == np.bool_:
ret = _rand_bool(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ else:
+ raise TypeError('Unsupported dtype %r for randint' % _dtype)
- if size is None and dtype in (np.bool, np.int, np.long):
+ if size is None and dtype in (bool, int, np.compat.long):
if np.array(ret).shape == ():
return dtype(ret)
return ret
@@ -654,6 +771,10 @@ cdef class RandomState:
Return random bytes.
+ .. note::
+ New code should use the ``bytes`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
length : int
@@ -664,11 +785,14 @@ cdef class RandomState:
out : str
String of length `length`.
+ See Also
+ --------
+ Generator.bytes: which should be used for new code.
+
Examples
--------
>>> np.random.bytes(10)
' eh\\x85\\x022SZ\\xbf\\xa4' #random
-
"""
cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
# Interpret the uint32s as little-endian to convert them to bytes
@@ -685,6 +809,10 @@ cdef class RandomState:
.. versionadded:: 1.7.0
+ .. note::
+ New code should use the ``choice`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : 1-D array-like or int
@@ -718,6 +846,7 @@ cdef class RandomState:
See Also
--------
randint, shuffle, permutation
+ Generator.choice: which should be used in new code
Examples
--------
@@ -819,7 +948,7 @@ cdef class RandomState:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
elif size < 0:
- raise ValueError("negative dimensions are not allowed")
+ raise ValueError("Negative dimensions are not allowed")
if p is not None:
if np.count_nonzero(p > 0) < size:
@@ -877,6 +1006,10 @@ cdef class RandomState:
any value within the given interval is equally likely to be drawn
by `uniform`.
+ .. note::
+ New code should use the ``uniform`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
low : float or array_like of floats, optional
@@ -884,7 +1017,7 @@ cdef class RandomState:
greater than or equal to low. The default value is 0.
high : float or array_like of floats
Upper boundary of the output interval. All values generated will be
- less than high. The default value is 1.0.
+ less than or equal to high. The default value is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -906,6 +1039,7 @@ cdef class RandomState:
rand : Convenience function that accepts dimensions as input, e.g.,
``rand(2,2)`` would generate a 2-by-2 array of floats,
uniformly distributed over ``[0, 1)``.
+ Generator.uniform: which should be used for new code.
Notes
-----
@@ -919,7 +1053,14 @@ cdef class RandomState:
If ``high`` < ``low``, the results are officially undefined
and may eventually raise an error, i.e. do not rely on this
function to behave when passed arguments satisfying that
- inequality condition.
+ inequality condition. The ``high`` limit may be included in the
+ returned array of floats due to floating-point rounding in the
+ equation ``low + (high-low) * random_sample()``. For example:
+
+ >>> x = np.float32(5*0.99999999)
+ >>> x
+ 5.0
+
Examples
--------
@@ -985,7 +1126,7 @@ cdef class RandomState:
.. note::
This is a convenience function for users porting code from Matlab,
- and wraps `numpy.random.random_sample`. That function takes a
+ and wraps `random_sample`. That function takes a
tuple to specify the size of the output, which is consistent with
other NumPy functions like `numpy.zeros` and `numpy.ones`.
@@ -1029,10 +1170,14 @@ cdef class RandomState:
.. note::
This is a convenience function for users porting code from Matlab,
- and wraps `numpy.random.standard_normal`. That function takes a
+ and wraps `standard_normal`. That function takes a
tuple to specify the size of the output, which is consistent with
other NumPy functions like `numpy.zeros` and `numpy.ones`.
+ .. note::
+ New code should use the ``standard_normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
If positive int_like arguments are provided, `randn` generates an array
of shape ``(d0, d1, ..., dn)``, filled
with random floats sampled from a univariate "normal" (Gaussian)
@@ -1056,6 +1201,7 @@ cdef class RandomState:
--------
standard_normal : Similar, but takes a tuple as its argument.
normal : Also accepts mu and sigma arguments.
+ Generator.standard_normal: which should be used for new code.
Notes
-----
@@ -1084,11 +1230,11 @@ cdef class RandomState:
"""
random_integers(low, high=None, size=None)
- Random integers of type np.int between `low` and `high`, inclusive.
+ Random integers of type `np.int_` between `low` and `high`, inclusive.
- Return random integers of type np.int from the "discrete uniform"
+ Return random integers of type `np.int_` from the "discrete uniform"
distribution in the closed interval [`low`, `high`]. If `high` is
- None (the default), then results are from [1, `low`]. The np.int
+ None (the default), then results are from [1, `low`]. The `np.int_`
type translates to the C long integer type and its precision
is platform dependent.
@@ -1182,6 +1328,10 @@ cdef class RandomState:
Draw samples from a standard Normal distribution (mean=0, stdev=1).
+ .. note::
+ New code should use the ``standard_normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
@@ -1195,6 +1345,13 @@ cdef class RandomState:
A floating-point array of shape ``size`` of drawn samples, or a
single sample if ``size`` was not specified.
+ See Also
+ --------
+ normal :
+ Equivalent function with additional ``loc`` and ``scale`` arguments
+ for setting the mean and standard deviation.
+ Generator.standard_normal: which should be used for new code.
+
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
@@ -1202,12 +1359,6 @@ cdef class RandomState:
mu + sigma * np.random.standard_normal(size=...)
np.random.normal(mu, sigma, size=...)
- See Also
- --------
- normal :
- Equivalent function with additional ``loc`` and ``scale`` arguments
- for setting the mean and standard deviation.
-
Examples
--------
>>> np.random.standard_normal()
@@ -1252,6 +1403,10 @@ cdef class RandomState:
by a large number of tiny, random disturbances, each with its own
unique distribution [2]_.
+ .. note::
+ New code should use the ``normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats
@@ -1274,6 +1429,7 @@ cdef class RandomState:
--------
scipy.stats.norm : probability density function, distribution or
cumulative density function, etc.
+ Generator.normal: which should be used for new code.
Notes
-----
@@ -1289,8 +1445,8 @@ cdef class RandomState:
The function has its peak at the mean, and its "spread" increases with
the standard deviation (the function reaches 0.607 times its maximum at
:math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that
- `numpy.random.normal` is more likely to return samples lying close to
- the mean, rather than those far away.
+ normal is more likely to return samples lying close to the mean, rather
+ than those far away.
References
----------
@@ -1347,6 +1503,10 @@ cdef class RandomState:
Samples are drawn from a Gamma distribution with specified parameters,
shape (sometimes designated "k") and scale=1.
+ .. note::
+ New code should use the ``standard_gamma`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
shape : float or array_like of floats
@@ -1366,6 +1526,7 @@ cdef class RandomState:
--------
scipy.stats.gamma : probability density function, distribution or
cumulative density function, etc.
+ Generator.standard_gamma: which should be used for new code.
Notes
-----
@@ -1423,6 +1584,10 @@ cdef class RandomState:
`shape` (sometimes designated "k") and `scale` (sometimes designated
"theta"), where both parameters are > 0.
+ .. note::
+ New code should use the ``gamma`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
shape : float or array_like of floats
@@ -1445,6 +1610,7 @@ cdef class RandomState:
--------
scipy.stats.gamma : probability density function, distribution or
cumulative density function, etc.
+ Generator.gamma: which should be used for new code.
Notes
-----
@@ -1507,6 +1673,10 @@ cdef class RandomState:
that arises in ANOVA tests, and is the ratio of two chi-square
variates.
+ .. note::
+ New code should use the ``f`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
dfnum : float or array_like of floats
@@ -1528,6 +1698,7 @@ cdef class RandomState:
--------
scipy.stats.f : probability density function, distribution or
cumulative density function, etc.
+ Generator.f: which should be used for new code.
Notes
-----
@@ -1590,6 +1761,10 @@ cdef class RandomState:
freedom in denominator), where both parameters > 1.
`nonc` is the non-centrality parameter.
+ .. note::
+ New code should use the ``noncentral_f`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
dfnum : float or array_like of floats
@@ -1614,6 +1789,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized noncentral Fisher distribution.
+ See Also
+ --------
+ Generator.noncentral_f: which should be used for new code.
+
Notes
-----
When calculating the power of an experiment (power = probability of
@@ -1667,6 +1846,10 @@ cdef class RandomState:
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
+ .. note::
+ New code should use the ``chisquare`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
df : float or array_like of floats
@@ -1688,6 +1871,10 @@ cdef class RandomState:
When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
is given.
+ See Also
+ --------
+ Generator.chisquare: which should be used for new code.
+
Notes
-----
The variable obtained by summing the squares of `df` independent,
@@ -1717,7 +1904,6 @@ cdef class RandomState:
--------
>>> np.random.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
-
"""
return cont(&legacy_chisquare, &self._aug_state, size, self.lock, 1,
df, 'df', CONS_POSITIVE,
@@ -1733,6 +1919,10 @@ cdef class RandomState:
The noncentral :math:`\\chi^2` distribution is a generalization of
the :math:`\\chi^2` distribution.
+ .. note::
+ New code should use the ``noncentral_chisquare`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
df : float or array_like of floats
@@ -1753,6 +1943,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized noncentral chi-square distribution.
+ See Also
+ --------
+ Generator.noncentral_chisquare: which should be used for new code.
+
Notes
-----
The probability density function for the noncentral Chi-square
@@ -1811,6 +2005,10 @@ cdef class RandomState:
Also known as the Lorentz distribution.
+ .. note::
+ New code should use the ``standard_cauchy`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
@@ -1823,6 +2021,10 @@ cdef class RandomState:
samples : ndarray or scalar
The drawn samples.
+ See Also
+ --------
+ Generator.standard_cauchy: which should be used for new code.
+
Notes
-----
The probability density function for the full Cauchy distribution is
@@ -1879,6 +2081,10 @@ cdef class RandomState:
large, the result resembles that of the standard normal
distribution (`standard_normal`).
+ .. note::
+ New code should use the ``standard_t`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
df : float or array_like of floats
@@ -1894,6 +2100,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized standard Student's t distribution.
+ See Also
+ --------
+ Generator.standard_t: which should be used for new code.
+
Notes
-----
The probability density function for the t distribution is
@@ -1976,6 +2186,10 @@ cdef class RandomState:
circle. It may be thought of as the circular analogue of the normal
distribution.
+ .. note::
+ New code should use the ``vonmises`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mu : float or array_like of floats
@@ -1997,6 +2211,7 @@ cdef class RandomState:
--------
scipy.stats.vonmises : probability density function, distribution, or
cumulative density function, etc.
+ Generator.vonmises: which should be used for new code.
Notes
-----
@@ -2069,6 +2284,10 @@ cdef class RandomState:
20 percent of the range, while the other 20 percent fill the
remaining 80 percent of the range.
+ .. note::
+ New code should use the ``pareto`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -2090,6 +2309,7 @@ cdef class RandomState:
cumulative density function, etc.
scipy.stats.genpareto : probability density function, distribution or
cumulative density function, etc.
+ Generator.pareto: which should be used for new code.
Notes
-----
@@ -2158,6 +2378,10 @@ cdef class RandomState:
The more common 2-parameter Weibull, including a scale parameter
:math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.
+ .. note::
+ New code should use the ``weibull`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -2179,6 +2403,7 @@ cdef class RandomState:
scipy.stats.weibull_min
scipy.stats.genextreme
gumbel
+ Generator.weibull: which should be used for new code.
Notes
-----
@@ -2249,6 +2474,10 @@ cdef class RandomState:
Also known as the power function distribution.
+ .. note::
+ New code should use the ``power`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -2269,6 +2498,10 @@ cdef class RandomState:
ValueError
If a < 1.
+ See Also
+ --------
+ Generator.power: which should be used for new code.
+
Notes
-----
The probability density function is
@@ -2352,6 +2585,10 @@ cdef class RandomState:
difference between two independent, identically distributed exponential
random variables.
+ .. note::
+ New code should use the ``laplace`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats, optional
@@ -2370,6 +2607,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized Laplace distribution.
+ See Also
+ --------
+ Generator.laplace: which should be used for new code.
+
Notes
-----
It has the probability density function
@@ -2435,6 +2676,10 @@ cdef class RandomState:
scale. For more information on the Gumbel distribution, see
Notes and References below.
+ .. note::
+ New code should use the ``gumbel`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats, optional
@@ -2459,6 +2704,7 @@ cdef class RandomState:
scipy.stats.gumbel_r
scipy.stats.genextreme
weibull
+ Generator.gumbel: which should be used for new code.
Notes
-----
@@ -2552,6 +2798,10 @@ cdef class RandomState:
Samples are drawn from a logistic distribution with specified
parameters, loc (location or mean, also median), and scale (>0).
+ .. note::
+ New code should use the ``logistic`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats, optional
@@ -2574,6 +2824,7 @@ cdef class RandomState:
--------
scipy.stats.logistic : probability density function, distribution or
cumulative density function, etc.
+ Generator.logistic: which should be used for new code.
Notes
-----
@@ -2634,6 +2885,10 @@ cdef class RandomState:
deviation are not the values for the distribution itself, but of the
underlying normal distribution it is derived from.
+ .. note::
+ New code should use the ``lognormal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mean : float or array_like of floats, optional
@@ -2656,6 +2911,7 @@ cdef class RandomState:
--------
scipy.stats.lognorm : probability density function, distribution,
cumulative density function, etc.
+ Generator.lognormal: which should be used for new code.
Notes
-----
@@ -2742,6 +2998,10 @@ cdef class RandomState:
The :math:`\\chi` and Weibull distributions are generalizations of the
Rayleigh.
+ .. note::
+ New code should use the ``rayleigh`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
scale : float or array_like of floats, optional
@@ -2757,6 +3017,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized Rayleigh distribution.
+ See Also
+ --------
+ Generator.rayleigh: which should be used for new code.
+
Notes
-----
The probability density function for the Rayleigh distribution is
@@ -2816,6 +3080,10 @@ cdef class RandomState:
because there is an inverse relationship between the time to cover a
unit distance and distance covered in unit time.
+ .. note::
+ New code should use the ``wald`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mean : float or array_like of floats
@@ -2833,6 +3101,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized Wald distribution.
+ See Also
+ --------
+ Generator.wald: which should be used for new code.
+
Notes
-----
The probability density function for the Wald distribution is
@@ -2881,6 +3153,10 @@ cdef class RandomState:
limit right. Unlike the other distributions, these parameters
directly define the shape of the pdf.
+ .. note::
+ New code should use the ``triangular`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
left : float or array_like of floats
@@ -2902,6 +3178,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized triangular distribution.
+ See Also
+ --------
+ Generator.triangular: which should be used for new code.
+
Notes
-----
The probability density function for the triangular distribution is
@@ -2980,6 +3260,10 @@ cdef class RandomState:
n an integer >= 0 and p is in the interval [0,1]. (n may be
input as a float, but it is truncated to an integer in use)
+ .. note::
+ New code should use the ``binomial`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
n : int or array_like of ints
@@ -3003,6 +3287,7 @@ cdef class RandomState:
--------
scipy.stats.binom : probability density function, distribution or
cumulative density function, etc.
+ Generator.binomial: which should be used for new code.
Notes
-----
@@ -3078,7 +3363,6 @@ cdef class RandomState:
it = np.PyArray_MultiIterNew2(p_arr, n_arr)
randoms = <np.ndarray>np.empty(it.shape, int)
- randoms_data = <long *>np.PyArray_DATA(randoms)
cnt = np.PyArray_SIZE(randoms)
it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr)
@@ -3125,6 +3409,10 @@ cdef class RandomState:
parameters, `n` successes and `p` probability of success where `n`
is > 0 and `p` is in the interval [0, 1].
+ .. note::
+ New code should use the ``negative_binomial`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
n : float or array_like of floats
@@ -3144,6 +3432,10 @@ cdef class RandomState:
where each sample is equal to N, the number of failures that
occurred before a total of n successes was reached.
+ See Also
+ --------
+ Generator.negative_binomial: which should be used for new code.
+
Notes
-----
The probability mass function of the negative binomial distribution is
@@ -3202,6 +3494,10 @@ cdef class RandomState:
The Poisson distribution is the limit of the binomial distribution
for large N.
+ .. note::
+ New code should use the ``poisson`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
lam : float or array_like of floats
@@ -3218,6 +3514,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized Poisson distribution.
+ See Also
+ --------
+ Generator.poisson: which should be used for new code.
+
Notes
-----
The Poisson distribution
@@ -3280,6 +3580,10 @@ cdef class RandomState:
frequency of an item is inversely proportional to its rank in a
frequency table.
+ .. note::
+ New code should use the ``zipf`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -3299,6 +3603,7 @@ cdef class RandomState:
--------
scipy.stats.zipf : probability density function, distribution, or
cumulative density function, etc.
+ Generator.zipf: which should be used for new code.
Notes
-----
@@ -3365,6 +3670,10 @@ cdef class RandomState:
where `p` is the probability of success of an individual trial.
+ .. note::
+ New code should use the ``geometric`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
p : float or array_like of floats
@@ -3380,6 +3689,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized geometric distribution.
+ See Also
+ --------
+ Generator.geometric: which should be used for new code.
+
Examples
--------
Draw ten thousand values from the geometric distribution,
@@ -3411,6 +3724,10 @@ cdef class RandomState:
a bad selection), and `nsample` (number of items sampled, which is less
than or equal to the sum ``ngood + nbad``).
+ .. note::
+ New code should use the ``hypergeometric`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
ngood : int or array_like of ints
@@ -3438,6 +3755,7 @@ cdef class RandomState:
--------
scipy.stats.hypergeom : probability density function, distribution or
cumulative density function, etc.
+ Generator.hypergeometric: which should be used for new code.
Notes
-----
@@ -3537,6 +3855,10 @@ cdef class RandomState:
Samples are drawn from a log series distribution with specified
shape parameter, 0 < ``p`` < 1.
+ .. note::
+ New code should use the ``logseries`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
p : float or array_like of floats
@@ -3556,6 +3878,7 @@ cdef class RandomState:
--------
scipy.stats.logser : probability density function, distribution or
cumulative density function, etc.
+ Generator.logseries: which should be used for new code.
Notes
-----
@@ -3625,6 +3948,10 @@ cdef class RandomState:
(average or "center") and variance (standard deviation, or "width,"
squared) of the one-dimensional normal distribution.
+ .. note::
+ New code should use the ``multivariate_normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mean : 1-D array_like, of length N
@@ -3652,6 +3979,10 @@ cdef class RandomState:
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
+ See Also
+ --------
+ Generator.multivariate_normal: which should be used for new code.
+
Notes
-----
The mean is a coordinate in N-dimensional space, which represents the
@@ -3791,6 +4122,10 @@ cdef class RandomState:
``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the
outcome was ``i``.
+ .. note::
+ New code should use the ``multinomial`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
n : int
@@ -3814,6 +4149,10 @@ cdef class RandomState:
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
+ See Also
+ --------
+ Generator.multinomial: which should be used for new code.
+
Examples
--------
Throw a dice 20 times:
@@ -3860,8 +4199,8 @@ cdef class RandomState:
cdef long ni
d = len(pvals)
- parr = <np.ndarray>np.PyArray_FROM_OTF(
- pvals, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ parr = <np.ndarray>np.PyArray_FROMANY(
+ pvals, np.NPY_DOUBLE, 1, 1, np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
pix = <double*>np.PyArray_DATA(parr)
check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1)
if kahan_sum(pix, d-1) > (1.0 + 1e-12):
@@ -3901,6 +4240,10 @@ cdef class RandomState:
is a conjugate prior of a multinomial distribution in Bayesian
inference.
+ .. note::
+ New code should use the ``dirichlet`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
alpha : array
@@ -3921,6 +4264,10 @@ cdef class RandomState:
ValueError
If any value in alpha is less than or equal to zero
+ See Also
+ --------
+ Generator.dirichlet: which should be used for new code.
+
Notes
-----
The Dirichlet distribution is a distribution over vectors
@@ -4038,6 +4385,10 @@ cdef class RandomState:
multi-dimensional array. The order of sub-arrays is changed but
their contents remains the same.
+ .. note::
+ New code should use the ``shuffle`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
x : array_like
@@ -4047,6 +4398,10 @@ cdef class RandomState:
-------
None
+ See Also
+ --------
+ Generator.shuffle: which should be used for new code.
+
Examples
--------
>>> arr = np.arange(10)
@@ -4125,6 +4480,10 @@ cdef class RandomState:
If `x` is a multi-dimensional array, it is only shuffled along its
first index.
+ .. note::
+ New code should use the ``permutation`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
x : int or array_like
@@ -4137,6 +4496,9 @@ cdef class RandomState:
out : ndarray
Permuted sequence or array range.
+ See Also
+ --------
+ Generator.permutation: which should be used for new code.
Examples
--------
diff --git a/numpy/random/setup.py b/numpy/random/setup.py
index ce7f0565f..5d6ff2c8b 100644
--- a/numpy/random/setup.py
+++ b/numpy/random/setup.py
@@ -1,5 +1,3 @@
-from __future__ import division, print_function
-
import os
import platform
import sys
@@ -34,10 +32,13 @@ def configuration(parent_package='', top_path=None):
defs.append(('NPY_NO_DEPRECATED_API', 0))
config.add_data_dir('tests')
+ config.add_data_dir('_examples')
EXTRA_LINK_ARGS = []
- # Math lib
- EXTRA_LIBRARIES = ['m'] if os.name != 'nt' else []
+ EXTRA_LIBRARIES = ['npyrandom']
+ if os.name != 'nt':
+ # Math lib
+ EXTRA_LIBRARIES.append('m')
# Some bit generators exclude GCC inlining
EXTRA_COMPILE_ARGS = ['-U__GNUC_GNU_INLINE__']
@@ -47,85 +48,94 @@ def configuration(parent_package='', top_path=None):
elif not is_msvc:
# Some bit generators require c99
EXTRA_COMPILE_ARGS += ['-std=c99']
- INTEL_LIKE = any(arch in platform.machine()
- for arch in ('x86', 'i686', 'i386', 'amd64'))
- if INTEL_LIKE:
- # Assumes GCC or GCC-like compiler
- EXTRA_COMPILE_ARGS += ['-msse2']
# Use legacy integer variable sizes
LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')]
PCG64_DEFS = []
# One can force emulated 128-bit arithmetic if one wants.
#PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')]
+ depends = ['__init__.pxd', 'c_distributions.pxd', 'bit_generator.pxd']
+
+ # npyrandom - a library like npymath
+ npyrandom_sources = [
+ 'src/distributions/logfactorial.c',
+ 'src/distributions/distributions.c',
+ 'src/distributions/random_mvhg_count.c',
+ 'src/distributions/random_mvhg_marginals.c',
+ 'src/distributions/random_hypergeometric.c',
+ ]
+ config.add_installed_library('npyrandom',
+ sources=npyrandom_sources,
+ install_dir='lib',
+ build_info={
+ 'include_dirs' : [], # empty list required for creating npyrandom.h
+ 'extra_compiler_args' : (['/GL-'] if is_msvc else []),
+ })
for gen in ['mt19937']:
# gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
- config.add_extension(gen,
- sources=['{0}.c'.format(gen),
- 'src/{0}/{0}.c'.format(gen),
- 'src/{0}/{0}-jump.c'.format(gen)],
+ config.add_extension(f'_{gen}',
+ sources=[f'_{gen}.c',
+ f'src/{gen}/{gen}.c',
+ f'src/{gen}/{gen}-jump.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
- depends=['%s.pyx' % gen],
+ depends=depends + [f'_{gen}.pyx'],
define_macros=defs,
)
for gen in ['philox', 'pcg64', 'sfc64']:
# gen.pyx, src/gen/gen.c
_defs = defs + PCG64_DEFS if gen == 'pcg64' else defs
- config.add_extension(gen,
- sources=['{0}.c'.format(gen),
- 'src/{0}/{0}.c'.format(gen)],
+ config.add_extension(f'_{gen}',
+ sources=[f'_{gen}.c',
+ f'src/{gen}/{gen}.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
- depends=['%s.pyx' % gen, 'bit_generator.pyx',
- 'bit_generator.pxd'],
+ depends=depends + [f'_{gen}.pyx',
+ 'bit_generator.pyx', 'bit_generator.pxd'],
define_macros=_defs,
)
- for gen in ['common', 'bit_generator']:
+ for gen in ['_common', 'bit_generator']:
# gen.pyx
config.add_extension(gen,
- sources=['{0}.c'.format(gen)],
+ sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
include_dirs=['.', 'src'],
- depends=['%s.pyx' % gen, '%s.pxd' % gen,],
+ depends=depends + [f'{gen}.pyx', f'{gen}.pxd',],
define_macros=defs,
)
- other_srcs = [
- 'src/distributions/logfactorial.c',
- 'src/distributions/distributions.c',
- 'src/distributions/random_hypergeometric.c',
- ]
- for gen in ['generator', 'bounded_integers']:
+ config.add_data_files('{gen}.pxd')
+ for gen in ['_generator', '_bounded_integers']:
# gen.pyx, src/distributions/distributions.c
config.add_extension(gen,
- sources=['{0}.c'.format(gen)] + other_srcs,
+ sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
include_dirs=['.', 'src'],
extra_link_args=EXTRA_LINK_ARGS,
- depends=['%s.pyx' % gen],
+ depends=depends + [f'{gen}.pyx'],
define_macros=defs,
)
+ config.add_data_files('_bounded_integers.pxd')
config.add_extension('mtrand',
- # mtrand does not depend on random_hypergeometric.c.
sources=['mtrand.c',
'src/legacy/legacy-distributions.c',
- 'src/distributions/logfactorial.c',
- 'src/distributions/distributions.c'],
+ 'src/distributions/distributions.c',
+ ],
include_dirs=['.', 'src', 'src/legacy'],
- libraries=EXTRA_LIBRARIES,
+ libraries=['m'] if os.name != 'nt' else [],
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
- depends=['mtrand.pyx'],
+ depends=depends + ['mtrand.pyx'],
define_macros=defs + LEGACY_DEFS,
)
+ config.add_data_files(*depends)
return config
diff --git a/numpy/random/src/aligned_malloc/aligned_malloc.c b/numpy/random/src/aligned_malloc/aligned_malloc.c
deleted file mode 100644
index 6e8192cfb..000000000
--- a/numpy/random/src/aligned_malloc/aligned_malloc.c
+++ /dev/null
@@ -1,9 +0,0 @@
-#include "aligned_malloc.h"
-
-static NPY_INLINE void *PyArray_realloc_aligned(void *p, size_t n);
-
-static NPY_INLINE void *PyArray_malloc_aligned(size_t n);
-
-static NPY_INLINE void *PyArray_calloc_aligned(size_t n, size_t s);
-
-static NPY_INLINE void PyArray_free_aligned(void *p); \ No newline at end of file
diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c
index 1244ffe65..0b46dc6d8 100644
--- a/numpy/random/src/distributions/distributions.c
+++ b/numpy/random/src/distributions/distributions.c
@@ -1,4 +1,4 @@
-#include "distributions.h"
+#include "numpy/random/distributions.h"
#include "ziggurat_constants.h"
#include "logfactorial.h"
@@ -6,92 +6,42 @@
#include <intrin.h>
#endif
-/* Random generators for external use */
-float random_float(bitgen_t *bitgen_state) { return next_float(bitgen_state); }
-
-double random_double(bitgen_t *bitgen_state) {
- return next_double(bitgen_state);
+/* Inline generators for internal use */
+static NPY_INLINE uint32_t next_uint32(bitgen_t *bitgen_state) {
+ return bitgen_state->next_uint32(bitgen_state->state);
}
-
-static NPY_INLINE double next_standard_exponential(bitgen_t *bitgen_state) {
- return -log(1.0 - next_double(bitgen_state));
+static NPY_INLINE uint64_t next_uint64(bitgen_t *bitgen_state) {
+ return bitgen_state->next_uint64(bitgen_state->state);
}
-double random_standard_exponential(bitgen_t *bitgen_state) {
- return next_standard_exponential(bitgen_state);
+static NPY_INLINE float next_float(bitgen_t *bitgen_state) {
+ return (next_uint32(bitgen_state) >> 9) * (1.0f / 8388608.0f);
}
-void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt,
- double *out) {
- npy_intp i;
- for (i = 0; i < cnt; i++) {
- out[i] = next_standard_exponential(bitgen_state);
- }
+/* Random generators for external use */
+float random_standard_uniform_f(bitgen_t *bitgen_state) {
+ return next_float(bitgen_state);
}
-float random_standard_exponential_f(bitgen_t *bitgen_state) {
- return -logf(1.0f - next_float(bitgen_state));
+double random_standard_uniform(bitgen_t *bitgen_state) {
+ return next_double(bitgen_state);
}
-void random_double_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
+void random_standard_uniform_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
npy_intp i;
for (i = 0; i < cnt; i++) {
out[i] = next_double(bitgen_state);
}
}
-#if 0
-double random_gauss(bitgen_t *bitgen_state) {
- if (bitgen_state->has_gauss) {
- const double temp = bitgen_state->gauss;
- bitgen_state->has_gauss = false;
- bitgen_state->gauss = 0.0;
- return temp;
- } else {
- double f, x1, x2, r2;
-
- do {
- x1 = 2.0 * next_double(bitgen_state) - 1.0;
- x2 = 2.0 * next_double(bitgen_state) - 1.0;
- r2 = x1 * x1 + x2 * x2;
- } while (r2 >= 1.0 || r2 == 0.0);
-
- /* Polar method, a more efficient version of the Box-Muller approach. */
- f = sqrt(-2.0 * log(r2) / r2);
- /* Keep for next call */
- bitgen_state->gauss = f * x1;
- bitgen_state->has_gauss = true;
- return f * x2;
- }
-}
-float random_gauss_f(bitgen_t *bitgen_state) {
- if (bitgen_state->has_gauss_f) {
- const float temp = bitgen_state->gauss_f;
- bitgen_state->has_gauss_f = false;
- bitgen_state->gauss_f = 0.0f;
- return temp;
- } else {
- float f, x1, x2, r2;
-
- do {
- x1 = 2.0f * next_float(bitgen_state) - 1.0f;
- x2 = 2.0f * next_float(bitgen_state) - 1.0f;
- r2 = x1 * x1 + x2 * x2;
- } while (r2 >= 1.0 || r2 == 0.0);
-
- /* Polar method, a more efficient version of the Box-Muller approach. */
- f = sqrtf(-2.0f * logf(r2) / r2);
- /* Keep for next call */
- bitgen_state->gauss_f = f * x1;
- bitgen_state->has_gauss_f = true;
- return f * x2;
+void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) {
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = next_float(bitgen_state);
}
}
-#endif
-
-static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state);
-static double standard_exponential_zig_unlikely(bitgen_t *bitgen_state,
+static double standard_exponential_unlikely(bitgen_t *bitgen_state,
uint8_t idx, double x) {
if (idx == 0) {
/* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */
@@ -101,11 +51,11 @@ static double standard_exponential_zig_unlikely(bitgen_t *bitgen_state,
exp(-x)) {
return x;
} else {
- return standard_exponential_zig(bitgen_state);
+ return random_standard_exponential(bitgen_state);
}
}
-static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state) {
+double random_standard_exponential(bitgen_t *bitgen_state) {
uint64_t ri;
uint8_t idx;
double x;
@@ -117,24 +67,18 @@ static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state) {
if (ri < ke_double[idx]) {
return x; /* 98.9% of the time we return here 1st try */
}
- return standard_exponential_zig_unlikely(bitgen_state, idx, x);
+ return standard_exponential_unlikely(bitgen_state, idx, x);
}
-double random_standard_exponential_zig(bitgen_t *bitgen_state) {
- return standard_exponential_zig(bitgen_state);
-}
-
-void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, npy_intp cnt,
- double *out) {
+void random_standard_exponential_fill(bitgen_t * bitgen_state, npy_intp cnt, double * out)
+{
npy_intp i;
for (i = 0; i < cnt; i++) {
- out[i] = standard_exponential_zig(bitgen_state);
+ out[i] = random_standard_exponential(bitgen_state);
}
}
-static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state);
-
-static float standard_exponential_zig_unlikely_f(bitgen_t *bitgen_state,
+static float standard_exponential_unlikely_f(bitgen_t *bitgen_state,
uint8_t idx, float x) {
if (idx == 0) {
/* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */
@@ -144,11 +88,11 @@ static float standard_exponential_zig_unlikely_f(bitgen_t *bitgen_state,
expf(-x)) {
return x;
} else {
- return standard_exponential_zig_f(bitgen_state);
+ return random_standard_exponential_f(bitgen_state);
}
}
-static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state) {
+float random_standard_exponential_f(bitgen_t *bitgen_state) {
uint32_t ri;
uint8_t idx;
float x;
@@ -160,14 +104,35 @@ static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state) {
if (ri < ke_float[idx]) {
return x; /* 98.9% of the time we return here 1st try */
}
- return standard_exponential_zig_unlikely_f(bitgen_state, idx, x);
+ return standard_exponential_unlikely_f(bitgen_state, idx, x);
+}
+
+void random_standard_exponential_fill_f(bitgen_t * bitgen_state, npy_intp cnt, float * out)
+{
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = random_standard_exponential_f(bitgen_state);
+ }
+}
+
+void random_standard_exponential_inv_fill(bitgen_t * bitgen_state, npy_intp cnt, double * out)
+{
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = -log(1.0 - next_double(bitgen_state));
+ }
}
-float random_standard_exponential_zig_f(bitgen_t *bitgen_state) {
- return standard_exponential_zig_f(bitgen_state);
+void random_standard_exponential_inv_fill_f(bitgen_t * bitgen_state, npy_intp cnt, float * out)
+{
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = -log(1.0 - next_float(bitgen_state));
+ }
}
-static NPY_INLINE double next_gauss_zig(bitgen_t *bitgen_state) {
+
+double random_standard_normal(bitgen_t *bitgen_state) {
uint64_t r;
int sign;
uint64_t rabs;
@@ -202,18 +167,14 @@ static NPY_INLINE double next_gauss_zig(bitgen_t *bitgen_state) {
}
}
-double random_gauss_zig(bitgen_t *bitgen_state) {
- return next_gauss_zig(bitgen_state);
-}
-
-void random_gauss_zig_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
+void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
npy_intp i;
for (i = 0; i < cnt; i++) {
- out[i] = next_gauss_zig(bitgen_state);
+ out[i] = random_standard_normal(bitgen_state);
}
}
-float random_gauss_zig_f(bitgen_t *bitgen_state) {
+float random_standard_normal_f(bitgen_t *bitgen_state) {
uint32_t r;
int sign;
uint32_t rabs;
@@ -247,113 +208,26 @@ float random_gauss_zig_f(bitgen_t *bitgen_state) {
}
}
-/*
-static NPY_INLINE double standard_gamma(bitgen_t *bitgen_state, double shape) {
- double b, c;
- double U, V, X, Y;
-
- if (shape == 1.0) {
- return random_standard_exponential(bitgen_state);
- } else if (shape < 1.0) {
- for (;;) {
- U = next_double(bitgen_state);
- V = random_standard_exponential(bitgen_state);
- if (U <= 1.0 - shape) {
- X = pow(U, 1. / shape);
- if (X <= V) {
- return X;
- }
- } else {
- Y = -log((1 - U) / shape);
- X = pow(1.0 - shape + shape * Y, 1. / shape);
- if (X <= (V + Y)) {
- return X;
- }
- }
- }
- } else {
- b = shape - 1. / 3.;
- c = 1. / sqrt(9 * b);
- for (;;) {
- do {
- X = random_gauss(bitgen_state);
- V = 1.0 + c * X;
- } while (V <= 0.0);
-
- V = V * V * V;
- U = next_double(bitgen_state);
- if (U < 1.0 - 0.0331 * (X * X) * (X * X))
- return (b * V);
- if (log(U) < 0.5 * X * X + b * (1. - V + log(V)))
- return (b * V);
- }
- }
-}
-
-static NPY_INLINE float standard_gamma_float(bitgen_t *bitgen_state, float
-shape) { float b, c; float U, V, X, Y;
-
- if (shape == 1.0f) {
- return random_standard_exponential_f(bitgen_state);
- } else if (shape < 1.0f) {
- for (;;) {
- U = next_float(bitgen_state);
- V = random_standard_exponential_f(bitgen_state);
- if (U <= 1.0f - shape) {
- X = powf(U, 1.0f / shape);
- if (X <= V) {
- return X;
- }
- } else {
- Y = -logf((1.0f - U) / shape);
- X = powf(1.0f - shape + shape * Y, 1.0f / shape);
- if (X <= (V + Y)) {
- return X;
- }
- }
- }
- } else {
- b = shape - 1.0f / 3.0f;
- c = 1.0f / sqrtf(9.0f * b);
- for (;;) {
- do {
- X = random_gauss_f(bitgen_state);
- V = 1.0f + c * X;
- } while (V <= 0.0f);
-
- V = V * V * V;
- U = next_float(bitgen_state);
- if (U < 1.0f - 0.0331f * (X * X) * (X * X))
- return (b * V);
- if (logf(U) < 0.5f * X * X + b * (1.0f - V + logf(V)))
- return (b * V);
- }
+void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) {
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = random_standard_normal_f(bitgen_state);
}
}
-
-double random_standard_gamma(bitgen_t *bitgen_state, double shape) {
- return standard_gamma(bitgen_state, shape);
-}
-
-float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) {
- return standard_gamma_float(bitgen_state, shape);
-}
-*/
-
-static NPY_INLINE double standard_gamma_zig(bitgen_t *bitgen_state,
+double random_standard_gamma(bitgen_t *bitgen_state,
double shape) {
double b, c;
double U, V, X, Y;
if (shape == 1.0) {
- return random_standard_exponential_zig(bitgen_state);
+ return random_standard_exponential(bitgen_state);
} else if (shape == 0.0) {
return 0.0;
} else if (shape < 1.0) {
for (;;) {
U = next_double(bitgen_state);
- V = random_standard_exponential_zig(bitgen_state);
+ V = random_standard_exponential(bitgen_state);
if (U <= 1.0 - shape) {
X = pow(U, 1. / shape);
if (X <= V) {
@@ -372,7 +246,7 @@ static NPY_INLINE double standard_gamma_zig(bitgen_t *bitgen_state,
c = 1. / sqrt(9 * b);
for (;;) {
do {
- X = random_gauss_zig(bitgen_state);
+ X = random_standard_normal(bitgen_state);
V = 1.0 + c * X;
} while (V <= 0.0);
@@ -387,19 +261,19 @@ static NPY_INLINE double standard_gamma_zig(bitgen_t *bitgen_state,
}
}
-static NPY_INLINE float standard_gamma_zig_f(bitgen_t *bitgen_state,
+float random_standard_gamma_f(bitgen_t *bitgen_state,
float shape) {
float b, c;
float U, V, X, Y;
if (shape == 1.0f) {
- return random_standard_exponential_zig_f(bitgen_state);
+ return random_standard_exponential_f(bitgen_state);
} else if (shape == 0.0) {
return 0.0;
} else if (shape < 1.0f) {
for (;;) {
U = next_float(bitgen_state);
- V = random_standard_exponential_zig_f(bitgen_state);
+ V = random_standard_exponential_f(bitgen_state);
if (U <= 1.0f - shape) {
X = powf(U, 1.0f / shape);
if (X <= V) {
@@ -418,7 +292,7 @@ static NPY_INLINE float standard_gamma_zig_f(bitgen_t *bitgen_state,
c = 1.0f / sqrtf(9.0f * b);
for (;;) {
do {
- X = random_gauss_zig_f(bitgen_state);
+ X = random_standard_normal_f(bitgen_state);
V = 1.0f + c * X;
} while (V <= 0.0f);
@@ -433,14 +307,6 @@ static NPY_INLINE float standard_gamma_zig_f(bitgen_t *bitgen_state,
}
}
-double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape) {
- return standard_gamma_zig(bitgen_state, shape);
-}
-
-float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape) {
- return standard_gamma_zig_f(bitgen_state, shape);
-}
-
int64_t random_positive_int64(bitgen_t *bitgen_state) {
return next_uint64(bitgen_state) >> 1;
}
@@ -470,10 +336,10 @@ uint64_t random_uint(bitgen_t *bitgen_state) {
* algorithm comes from SPECFUN by Shanjie Zhang and Jianming Jin and their
* book "Computation of Special Functions", 1996, John Wiley & Sons, Inc.
*
- * If loggam(k+1) is being used to compute log(k!) for an integer k, consider
+ * If random_loggam(k+1) is being used to compute log(k!) for an integer k, consider
* using logfactorial(k) instead.
*/
-double loggam(double x) {
+double random_loggam(double x) {
double x0, x2, xp, gl, gl0;
RAND_INT_TYPE k, n;
@@ -513,12 +379,12 @@ double random_normal(bitgen_t *bitgen_state, double loc, double scale) {
}
*/
-double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale) {
- return loc + scale * random_gauss_zig(bitgen_state);
+double random_normal(bitgen_t *bitgen_state, double loc, double scale) {
+ return loc + scale * random_standard_normal(bitgen_state);
}
double random_exponential(bitgen_t *bitgen_state, double scale) {
- return scale * standard_exponential_zig(bitgen_state);
+ return scale * random_standard_exponential(bitgen_state);
}
double random_uniform(bitgen_t *bitgen_state, double lower, double range) {
@@ -526,11 +392,11 @@ double random_uniform(bitgen_t *bitgen_state, double lower, double range) {
}
double random_gamma(bitgen_t *bitgen_state, double shape, double scale) {
- return scale * random_standard_gamma_zig(bitgen_state, shape);
+ return scale * random_standard_gamma(bitgen_state, shape);
}
-float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale) {
- return scale * random_standard_gamma_zig_f(bitgen_state, shape);
+float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) {
+ return scale * random_standard_gamma_f(bitgen_state, shape);
}
double random_beta(bitgen_t *bitgen_state, double a, double b) {
@@ -562,14 +428,14 @@ double random_beta(bitgen_t *bitgen_state, double a, double b) {
}
}
} else {
- Ga = random_standard_gamma_zig(bitgen_state, a);
- Gb = random_standard_gamma_zig(bitgen_state, b);
+ Ga = random_standard_gamma(bitgen_state, a);
+ Gb = random_standard_gamma(bitgen_state, b);
return Ga / (Ga + Gb);
}
}
double random_chisquare(bitgen_t *bitgen_state, double df) {
- return 2.0 * random_standard_gamma_zig(bitgen_state, df / 2.0);
+ return 2.0 * random_standard_gamma(bitgen_state, df / 2.0);
}
double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) {
@@ -578,22 +444,22 @@ double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) {
}
double random_standard_cauchy(bitgen_t *bitgen_state) {
- return random_gauss_zig(bitgen_state) / random_gauss_zig(bitgen_state);
+ return random_standard_normal(bitgen_state) / random_standard_normal(bitgen_state);
}
double random_pareto(bitgen_t *bitgen_state, double a) {
- return exp(standard_exponential_zig(bitgen_state) / a) - 1;
+ return exp(random_standard_exponential(bitgen_state) / a) - 1;
}
double random_weibull(bitgen_t *bitgen_state, double a) {
if (a == 0.0) {
return 0.0;
}
- return pow(standard_exponential_zig(bitgen_state), 1. / a);
+ return pow(random_standard_exponential(bitgen_state), 1. / a);
}
double random_power(bitgen_t *bitgen_state, double a) {
- return pow(1 - exp(-standard_exponential_zig(bitgen_state)), 1. / a);
+ return pow(1 - exp(-random_standard_exponential(bitgen_state)), 1. / a);
}
double random_laplace(bitgen_t *bitgen_state, double loc, double scale) {
@@ -634,7 +500,7 @@ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) {
}
double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) {
- return exp(random_normal_zig(bitgen_state, mean, sigma));
+ return exp(random_normal(bitgen_state, mean, sigma));
}
double random_rayleigh(bitgen_t *bitgen_state, double mode) {
@@ -644,8 +510,8 @@ double random_rayleigh(bitgen_t *bitgen_state, double mode) {
double random_standard_t(bitgen_t *bitgen_state, double df) {
double num, denom;
- num = random_gauss_zig(bitgen_state);
- denom = random_standard_gamma_zig(bitgen_state, df / 2);
+ num = random_standard_normal(bitgen_state);
+ denom = random_standard_gamma(bitgen_state, df / 2);
return sqrt(df / 2) * num / sqrt(denom);
}
@@ -699,7 +565,7 @@ static RAND_INT_TYPE random_poisson_ptrs(bitgen_t *bitgen_state, double lam) {
/* log(V) == log(0.0) ok here */
/* if U==0.0 so that us==0.0, log is ok since always returns */
if ((log(V) + log(invalpha) - log(a / (us * us) + b)) <=
- (-lam + k * loglam - loggam(k + 1))) {
+ (-lam + k * loglam - random_loggam(k + 1))) {
return k;
}
}
@@ -934,7 +800,7 @@ double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
}
if (1 < df) {
const double Chi2 = random_chisquare(bitgen_state, df - 1);
- const double n = random_gauss_zig(bitgen_state) + sqrt(nonc);
+ const double n = random_standard_normal(bitgen_state) + sqrt(nonc);
return Chi2 + n * n;
} else {
const RAND_INT_TYPE i = random_poisson(bitgen_state, nonc / 2.0);
@@ -953,7 +819,7 @@ double random_wald(bitgen_t *bitgen_state, double mean, double scale) {
double mu_2l;
mu_2l = mean / (2 * scale);
- Y = random_gauss_zig(bitgen_state);
+ Y = random_standard_normal(bitgen_state);
Y = mean * Y * Y;
X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y));
U = next_double(bitgen_state);
@@ -1092,8 +958,8 @@ RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a) {
while (1) {
double T, U, V, X;
- U = 1.0 - random_double(bitgen_state);
- V = random_double(bitgen_state);
+ U = 1.0 - next_double(bitgen_state);
+ V = next_double(bitgen_state);
X = floor(pow(U, -1.0 / am1));
/*
* The real result may be above what can be represented in a signed
@@ -1297,10 +1163,7 @@ static NPY_INLINE uint64_t bounded_lemire_uint64(bitgen_t *bitgen_state,
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
-
- const uint64_t threshold = -rng_excl % rng_excl;
- /* Same as: threshold=((uint64_t)(0x10000000000000000ULLL - rng_excl)) %
- * rng_excl; */
+ const uint64_t threshold = (UINT64_MAX - rng) % rng_excl;
while (leftover < threshold) {
m = ((__uint128_t)next_uint64(bitgen_state)) * rng_excl;
@@ -1323,10 +1186,7 @@ static NPY_INLINE uint64_t bounded_lemire_uint64(bitgen_t *bitgen_state,
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
-
- const uint64_t threshold = -rng_excl % rng_excl;
- /* Same as:threshold=((uint64_t)(0x10000000000000000ULLL - rng_excl)) %
- * rng_excl; */
+ const uint64_t threshold = (UINT64_MAX - rng) % rng_excl;
while (leftover < threshold) {
x = next_uint64(bitgen_state);
@@ -1387,8 +1247,7 @@ static NPY_INLINE uint32_t buffered_bounded_lemire_uint32(
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
- const uint32_t threshold = -rng_excl % rng_excl;
- /* Same as: threshold=((uint64_t)(0x100000000ULL - rng_excl)) % rng_excl; */
+ const uint32_t threshold = (UINT32_MAX - rng) % rng_excl;
while (leftover < threshold) {
m = ((uint64_t)next_uint32(bitgen_state)) * rng_excl;
@@ -1422,8 +1281,7 @@ static NPY_INLINE uint16_t buffered_bounded_lemire_uint16(
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
- const uint16_t threshold = -rng_excl % rng_excl;
- /* Same as: threshold=((uint32_t)(0x10000ULL - rng_excl)) % rng_excl; */
+ const uint16_t threshold = (UINT16_MAX - rng) % rng_excl;
while (leftover < threshold) {
m = ((uint32_t)buffered_uint16(bitgen_state, bcnt, buf)) * rng_excl;
@@ -1458,8 +1316,7 @@ static NPY_INLINE uint8_t buffered_bounded_lemire_uint8(bitgen_t *bitgen_state,
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
- const uint8_t threshold = -rng_excl % rng_excl;
- /* Same as: threshold=((uint16_t)(0x100ULL - rng_excl)) % rng_excl; */
+ const uint8_t threshold = (UINT8_MAX - rng) % rng_excl;
while (leftover < threshold) {
m = ((uint16_t)buffered_uint8(bitgen_state, bcnt, buf)) * rng_excl;
diff --git a/numpy/random/src/distributions/random_hypergeometric.c b/numpy/random/src/distributions/random_hypergeometric.c
index 94dc6380f..0da49bd62 100644
--- a/numpy/random/src/distributions/random_hypergeometric.c
+++ b/numpy/random/src/distributions/random_hypergeometric.c
@@ -1,4 +1,4 @@
-#include "distributions.h"
+#include "numpy/random/distributions.h"
#include "logfactorial.h"
#include <stdint.h>
@@ -188,8 +188,8 @@ static int64_t hypergeometric_hrua(bitgen_t *bitgen_state,
while (1) {
double U, V, X, T;
double gp;
- U = random_double(bitgen_state);
- V = random_double(bitgen_state); // "U star" in Stadlober (1989)
+ U = next_double(bitgen_state);
+ V = next_double(bitgen_state); // "U star" in Stadlober (1989)
X = a + h*(V - 0.5) / U;
// fast rejection:
diff --git a/numpy/random/src/distributions/random_mvhg_count.c b/numpy/random/src/distributions/random_mvhg_count.c
new file mode 100644
index 000000000..1d4ed978e
--- /dev/null
+++ b/numpy/random/src/distributions/random_mvhg_count.c
@@ -0,0 +1,131 @@
+#include "numpy/random/distributions.h"
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+
+/*
+ * random_multivariate_hypergeometric_count
+ *
+ * Draw variates from the multivariate hypergeometric distribution--
+ * the "count" algorithm.
+ *
+ * Parameters
+ * ----------
+ * bitgen_t *bitgen_state
+ * Pointer to a `bitgen_t` instance.
+ * int64_t total
+ * The sum of the values in the array `colors`. (This is redundant
+ * information, but we know the caller has already computed it, so
+ * we might as well use it.)
+ * size_t num_colors
+ * The length of the `colors` array.
+ * int64_t *colors
+ * The array of colors (i.e. the number of each type in the collection
+ * from which the random variate is drawn).
+ * int64_t nsample
+ * The number of objects drawn without replacement for each variate.
+ * `nsample` must not exceed sum(colors). This condition is not checked;
+ * it is assumed that the caller has already validated the value.
+ * size_t num_variates
+ * The number of variates to be produced and put in the array
+ * pointed to by `variates`. One variate is a vector of length
+ * `num_colors`, so the array pointed to by `variates` must have length
+ * `num_variates * num_colors`.
+ * int64_t *variates
+ * The array that will hold the result. It must have length
+ * `num_variates * num_colors`.
+ * The array is not initialized in the function; it is expected that the
+ * array has been initialized with zeros when the function is called.
+ *
+ * Notes
+ * -----
+ * The "count" algorithm for drawing one variate is roughly equivalent to the
+ * following numpy code:
+ *
+ * choices = np.repeat(np.arange(len(colors)), colors)
+ * selection = np.random.choice(choices, nsample, replace=False)
+ * variate = np.bincount(selection, minlength=len(colors))
+ *
+ * This function uses a temporary array with length sum(colors).
+ *
+ * Assumptions on the arguments (not checked in the function):
+ * * colors[k] >= 0 for k in range(num_colors)
+ * * total = sum(colors)
+ * * 0 <= nsample <= total
+ * * the product total * sizeof(size_t) does not exceed SIZE_MAX
+ * * the product num_variates * num_colors does not overflow
+ */
+
+int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates)
+{
+ size_t *choices;
+ bool more_than_half;
+
+ if ((total == 0) || (nsample == 0) || (num_variates == 0)) {
+ // Nothing to do.
+ return 0;
+ }
+
+ choices = malloc(total * (sizeof *choices));
+ if (choices == NULL) {
+ return -1;
+ }
+
+ /*
+ * If colors contains, for example, [3 2 5], then choices
+ * will contain [0 0 0 1 1 2 2 2 2 2].
+ */
+ for (size_t i = 0, k = 0; i < num_colors; ++i) {
+ for (int64_t j = 0; j < colors[i]; ++j) {
+ choices[k] = i;
+ ++k;
+ }
+ }
+
+ more_than_half = nsample > (total / 2);
+ if (more_than_half) {
+ nsample = total - nsample;
+ }
+
+ for (size_t i = 0; i < num_variates * num_colors; i += num_colors) {
+ /*
+ * Fisher-Yates shuffle, but only loop through the first
+ * `nsample` entries of `choices`. After the loop,
+ * choices[:nsample] contains a random sample from the
+ * the full array.
+ */
+ for (size_t j = 0; j < (size_t) nsample; ++j) {
+ size_t tmp, k;
+ // Note: nsample is not greater than total, so there is no danger
+ // of integer underflow in `(size_t) total - j - 1`.
+ k = j + (size_t) random_interval(bitgen_state,
+ (size_t) total - j - 1);
+ tmp = choices[k];
+ choices[k] = choices[j];
+ choices[j] = tmp;
+ }
+ /*
+ * Count the number of occurrences of each value in choices[:nsample].
+ * The result, stored in sample[i:i+num_colors], is the sample from
+ * the multivariate hypergeometric distribution.
+ */
+ for (size_t j = 0; j < (size_t) nsample; ++j) {
+ variates[i + choices[j]] += 1;
+ }
+
+ if (more_than_half) {
+ for (size_t k = 0; k < num_colors; ++k) {
+ variates[i + k] = colors[k] - variates[i + k];
+ }
+ }
+ }
+
+ free(choices);
+
+ return 0;
+}
diff --git a/numpy/random/src/distributions/random_mvhg_marginals.c b/numpy/random/src/distributions/random_mvhg_marginals.c
new file mode 100644
index 000000000..689a85671
--- /dev/null
+++ b/numpy/random/src/distributions/random_mvhg_marginals.c
@@ -0,0 +1,138 @@
+#include "numpy/random/distributions.h"
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <math.h>
+
+#include "logfactorial.h"
+
+
+/*
+ * random_multivariate_hypergeometric_marginals
+ *
+ * Draw samples from the multivariate hypergeometric distribution--
+ * the "marginals" algorithm.
+ *
+ * This version generates the sample by iteratively calling
+ * hypergeometric() (the univariate hypergeometric distribution).
+ *
+ * Parameters
+ * ----------
+ * bitgen_t *bitgen_state
+ * Pointer to a `bitgen_t` instance.
+ * int64_t total
+ * The sum of the values in the array `colors`. (This is redundant
+ * information, but we know the caller has already computed it, so
+ * we might as well use it.)
+ * size_t num_colors
+ * The length of the `colors` array. The functions assumes
+ * num_colors > 0.
+ * int64_t *colors
+ * The array of colors (i.e. the number of each type in the collection
+ * from which the random variate is drawn).
+ * int64_t nsample
+ * The number of objects drawn without replacement for each variate.
+ * `nsample` must not exceed sum(colors). This condition is not checked;
+ * it is assumed that the caller has already validated the value.
+ * size_t num_variates
+ * The number of variates to be produced and put in the array
+ * pointed to by `variates`. One variate is a vector of length
+ * `num_colors`, so the array pointed to by `variates` must have length
+ * `num_variates * num_colors`.
+ * int64_t *variates
+ * The array that will hold the result. It must have length
+ * `num_variates * num_colors`.
+ * The array is not initialized in the function; it is expected that the
+ * array has been initialized with zeros when the function is called.
+ *
+ * Notes
+ * -----
+ * Here's an example that demonstrates the idea of this algorithm.
+ *
+ * Suppose the urn contains red, green, blue and yellow marbles.
+ * Let nred be the number of red marbles, and define the quantities for
+ * the other colors similarly. The total number of marbles is
+ *
+ * total = nred + ngreen + nblue + nyellow.
+ *
+ * To generate a sample using rk_hypergeometric:
+ *
+ * red_sample = hypergeometric(ngood=nred, nbad=total - nred,
+ * nsample=nsample)
+ *
+ * This gives us the number of red marbles in the sample. The number of
+ * marbles in the sample that are *not* red is nsample - red_sample.
+ * To figure out the distribution of those marbles, we again use
+ * rk_hypergeometric:
+ *
+ * green_sample = hypergeometric(ngood=ngreen,
+ * nbad=total - nred - ngreen,
+ * nsample=nsample - red_sample)
+ *
+ * Similarly,
+ *
+ * blue_sample = hypergeometric(
+ * ngood=nblue,
+ * nbad=total - nred - ngreen - nblue,
+ * nsample=nsample - red_sample - green_sample)
+ *
+ * Finally,
+ *
+ * yellow_sample = total - (red_sample + green_sample + blue_sample).
+ *
+ * The above sequence of steps is implemented as a loop for an arbitrary
+ * number of colors in the innermost loop in the code below. `remaining`
+ * is the value passed to `nbad`; it is `total - colors[0]` in the first
+ * call to random_hypergeometric(), and then decreases by `colors[j]` in
+ * each iteration. `num_to_sample` is the `nsample` argument. It
+ * starts at this function's `nsample` input, and is decreased by the
+ * result of the call to random_hypergeometric() in each iteration.
+ *
+ * Assumptions on the arguments (not checked in the function):
+ * * colors[k] >= 0 for k in range(num_colors)
+ * * total = sum(colors)
+ * * 0 <= nsample <= total
+ * * the product num_variates * num_colors does not overflow
+ */
+
+void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates)
+{
+ bool more_than_half;
+
+ if ((total == 0) || (nsample == 0) || (num_variates == 0)) {
+ // Nothing to do.
+ return;
+ }
+
+ more_than_half = nsample > (total / 2);
+ if (more_than_half) {
+ nsample = total - nsample;
+ }
+
+ for (size_t i = 0; i < num_variates * num_colors; i += num_colors) {
+ int64_t num_to_sample = nsample;
+ int64_t remaining = total;
+ for (size_t j = 0; (num_to_sample > 0) && (j + 1 < num_colors); ++j) {
+ int64_t r;
+ remaining -= colors[j];
+ r = random_hypergeometric(bitgen_state,
+ colors[j], remaining, num_to_sample);
+ variates[i + j] = r;
+ num_to_sample -= r;
+ }
+
+ if (num_to_sample > 0) {
+ variates[i + num_colors - 1] = num_to_sample;
+ }
+
+ if (more_than_half) {
+ for (size_t k = 0; k < num_colors; ++k) {
+ variates[i + k] = colors[k] - variates[i + k];
+ }
+ }
+ }
+}
diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c
index 684b3d762..fd067fe8d 100644
--- a/numpy/random/src/legacy/legacy-distributions.c
+++ b/numpy/random/src/legacy/legacy-distributions.c
@@ -1,4 +1,4 @@
-#include "legacy-distributions.h"
+#include "include/legacy-distributions.h"
static NPY_INLINE double legacy_double(aug_bitgen_t *aug_state) {
@@ -294,8 +294,8 @@ static RAND_INT_TYPE random_hypergeometric_hrua(bitgen_t *bitgen_state,
d7 = sqrt((double)(popsize - m) * sample * d4 * d5 / (popsize - 1) + 0.5);
d8 = D1 * d7 + D2;
d9 = (RAND_INT_TYPE)floor((double)(m + 1) * (mingoodbad + 1) / (popsize + 2));
- d10 = (loggam(d9 + 1) + loggam(mingoodbad - d9 + 1) + loggam(m - d9 + 1) +
- loggam(maxgoodbad - m + d9 + 1));
+ d10 = (random_loggam(d9 + 1) + random_loggam(mingoodbad - d9 + 1) +
+ random_loggam(m - d9 + 1) + random_loggam(maxgoodbad - m + d9 + 1));
d11 = MIN(MIN(m, mingoodbad) + 1.0, floor(d6 + 16 * d7));
/* 16 for 16-decimal-digit precision in D1 and D2 */
@@ -309,8 +309,8 @@ static RAND_INT_TYPE random_hypergeometric_hrua(bitgen_t *bitgen_state,
continue;
Z = (RAND_INT_TYPE)floor(W);
- T = d10 - (loggam(Z + 1) + loggam(mingoodbad - Z + 1) + loggam(m - Z + 1) +
- loggam(maxgoodbad - m + Z + 1));
+ T = d10 - (random_loggam(Z + 1) + random_loggam(mingoodbad - Z + 1) +
+ random_loggam(m - Z + 1) + random_loggam(maxgoodbad - m + Z + 1));
/* fast acceptance: */
if ((X * (4.0 - X) - 3.0) <= T)
diff --git a/numpy/random/src/philox/philox-benchmark.c b/numpy/random/src/philox/philox-benchmark.c
index df5814d5f..9856a9b8e 100644
--- a/numpy/random/src/philox/philox-benchmark.c
+++ b/numpy/random/src/philox/philox-benchmark.c
@@ -1,5 +1,5 @@
/*
- * Simple benchamrk command
+ * Simple benchmark command
*
* cl philox-benchmark.c /Ox
*
diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py
index 0f57c4bd4..dad12c8a8 100644
--- a/numpy/random/tests/test_direct.py
+++ b/numpy/random/tests/test_direct.py
@@ -1,5 +1,6 @@
import os
from os.path import join
+import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
@@ -10,7 +11,7 @@ from numpy.random import (
Generator, MT19937, PCG64, Philox, RandomState, SeedSequence, SFC64,
default_rng
)
-from numpy.random.common import interface
+from numpy.random._common import interface
try:
import cffi # noqa: F401
@@ -26,6 +27,12 @@ try:
except ImportError:
MISSING_CTYPES = False
+if sys.flags.optimize > 1:
+ # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
+ # cffi cannot succeed
+ MISSING_CFFI = True
+
+
pwd = os.path.dirname(os.path.abspath(__file__))
@@ -138,7 +145,7 @@ def test_seedsequence():
assert len(dummy.spawn(10)) == 10
-class Base(object):
+class Base:
dtype = np.uint64
data2 = data1 = {}
@@ -403,7 +410,7 @@ class TestSFC64(Base):
cls.invalid_init_values = [(-1,)]
-class TestDefaultRNG(object):
+class TestDefaultRNG:
def test_seed(self):
for args in [(), (None,), (1234,), ([1234, 5678],)]:
rg = default_rng(*args)
diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py
new file mode 100644
index 000000000..f7efafba9
--- /dev/null
+++ b/numpy/random/tests/test_extending.py
@@ -0,0 +1,85 @@
+import os
+import pytest
+import shutil
+import subprocess
+import sys
+import warnings
+import numpy as np
+
+try:
+ import cffi
+except ImportError:
+ cffi = None
+
+if sys.flags.optimize > 1:
+ # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
+ # cffi cannot succeed
+ cffi = None
+
+try:
+ with warnings.catch_warnings(record=True) as w:
+ # numba issue gh-4733
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ import numba
+except ImportError:
+ numba = None
+
+try:
+ import cython
+ from Cython.Compiler.Version import version as cython_version
+except ImportError:
+ cython = None
+else:
+ from distutils.version import LooseVersion
+ # Cython 0.29.14 is required for Python 3.8 and there are
+ # other fixes in the 0.29 series that are needed even for earlier
+ # Python versions.
+ # Note: keep in sync with the one in pyproject.toml
+ required_version = LooseVersion('0.29.14')
+ if LooseVersion(cython_version) < required_version:
+ # too old or wrong cython, skip the test
+ cython = None
+
+@pytest.mark.skipif(cython is None, reason="requires cython")
+@pytest.mark.slow
+def test_cython(tmp_path):
+ srcdir = os.path.join(os.path.dirname(__file__), '..')
+ shutil.copytree(srcdir, tmp_path / 'random')
+ # build the examples and "install" them into a temporary directory
+ env = os.environ.copy()
+ subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
+ '--prefix', str(tmp_path / 'installdir'),
+ '--single-version-externally-managed',
+ '--record', str(tmp_path/ 'tmp_install_log.txt'),
+ ],
+ cwd=str(tmp_path / 'random' / '_examples' / 'cython'),
+ env=env)
+ # get the path to the so's
+ so1 = so2 = None
+ with open(tmp_path /'tmp_install_log.txt') as fid:
+ for line in fid:
+ if 'extending.' in line:
+ so1 = line.strip()
+ if 'extending_distributions' in line:
+ so2 = line.strip()
+ assert so1 is not None
+ assert so2 is not None
+ # import the so's without adding the directory to sys.path
+ from importlib.machinery import ExtensionFileLoader
+ extending = ExtensionFileLoader('extending', so1).load_module()
+ extending_distributions = ExtensionFileLoader('extending_distributions', so2).load_module()
+
+ # actually test the cython c-extension
+ from numpy.random import PCG64
+ values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd')
+ assert values.shape == (10,)
+ assert values.dtype == np.float64
+
+@pytest.mark.skipif(numba is None or cffi is None,
+ reason="requires numba and cffi")
+def test_numba():
+ from numpy.random._examples.numba import extending # noqa: F401
+
+@pytest.mark.skipif(cffi is None, reason="requires cffi")
+def test_cffi():
+ from numpy.random._examples.cffi import extending # noqa: F401
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index 391c33c1a..ce90ccdc5 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -3,8 +3,9 @@ import sys
import pytest
import numpy as np
+from numpy.linalg import LinAlgError
from numpy.testing import (
- assert_, assert_raises, assert_equal,
+ assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
@@ -18,7 +19,7 @@ def endpoint(request):
return request.param
-class TestSeed(object):
+class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
@@ -54,7 +55,7 @@ class TestSeed(object):
assert_raises(ValueError, Generator, MT19937)
-class TestBinomial(object):
+class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
@@ -69,7 +70,7 @@ class TestBinomial(object):
assert_raises(ValueError, random.binomial, 1, np.nan)
-class TestMultinomial(object):
+class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
@@ -114,8 +115,148 @@ class TestMultinomial(object):
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
+ def test_multidimensional_pvals(self):
+ assert_raises(ValueError, random.multinomial, 10, [[0, 1]])
+ assert_raises(ValueError, random.multinomial, 10, [[0], [1]])
+ assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])
+ assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))
-class TestSetState(object):
+
+class TestMultivariateHypergeometric:
+
+ def setup(self):
+ self.seed = 8675309
+
+ def test_argument_validation(self):
+ # Error cases...
+
+ # `colors` must be a 1-d sequence
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ 10, 4)
+
+ # Negative nsample
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [2, 3, 4], -1)
+
+ # Negative color
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [-1, 2, 3], 2)
+
+ # nsample exceeds sum(colors)
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [2, 3, 4], 10)
+
+ # nsample exceeds sum(colors) (edge case of empty colors)
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [], 1)
+
+ # Validation errors associated with very large values in colors.
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [999999999, 101], 5, 1, 'marginals')
+
+ int64_info = np.iinfo(np.int64)
+ max_int64 = int64_info.max
+ max_int64_index = max_int64 // int64_info.dtype.itemsize
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [max_int64_index - 100, 101], 5, 1, 'count')
+
+ @pytest.mark.parametrize('method', ['count', 'marginals'])
+ def test_edge_cases(self, method):
+ # Set the seed, but in fact, all the results in this test are
+ # deterministic, so we don't really need this.
+ random = Generator(MT19937(self.seed))
+
+ x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
+ assert_array_equal(x, [0, 0, 0])
+
+ x = random.multivariate_hypergeometric([], 0, method=method)
+ assert_array_equal(x, [])
+
+ x = random.multivariate_hypergeometric([], 0, size=1, method=method)
+ assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
+
+ x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
+ assert_array_equal(x, [0, 0, 0])
+
+ x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
+ assert_array_equal(x, [3, 0, 0])
+
+ colors = [1, 1, 0, 1, 1]
+ x = random.multivariate_hypergeometric(colors, sum(colors),
+ method=method)
+ assert_array_equal(x, colors)
+
+ x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
+ method=method)
+ assert_array_equal(x, [[3, 4, 5]]*3)
+
+ # Cases for nsample:
+ # nsample < 10
+ # 10 <= nsample < colors.sum()/2
+ # colors.sum()/2 < nsample < colors.sum() - 10
+ # colors.sum() - 10 < nsample < colors.sum()
+ @pytest.mark.parametrize('nsample', [8, 25, 45, 55])
+ @pytest.mark.parametrize('method', ['count', 'marginals'])
+ @pytest.mark.parametrize('size', [5, (2, 3), 150000])
+ def test_typical_cases(self, nsample, method, size):
+ random = Generator(MT19937(self.seed))
+
+ colors = np.array([10, 5, 20, 25])
+ sample = random.multivariate_hypergeometric(colors, nsample, size,
+ method=method)
+ if isinstance(size, int):
+ expected_shape = (size,) + colors.shape
+ else:
+ expected_shape = size + colors.shape
+ assert_equal(sample.shape, expected_shape)
+ assert_((sample >= 0).all())
+ assert_((sample <= colors).all())
+ assert_array_equal(sample.sum(axis=-1),
+ np.full(size, fill_value=nsample, dtype=int))
+ if isinstance(size, int) and size >= 100000:
+ # This sample is large enough to compare its mean to
+ # the expected values.
+ assert_allclose(sample.mean(axis=0),
+ nsample * colors / colors.sum(),
+ rtol=1e-3, atol=0.005)
+
+ def test_repeatability1(self):
+ random = Generator(MT19937(self.seed))
+ sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
+ method='count')
+ expected = np.array([[2, 1, 2],
+ [2, 1, 2],
+ [1, 1, 3],
+ [2, 0, 3],
+ [2, 1, 2]])
+ assert_array_equal(sample, expected)
+
+ def test_repeatability2(self):
+ random = Generator(MT19937(self.seed))
+ sample = random.multivariate_hypergeometric([20, 30, 50], 50,
+ size=5,
+ method='marginals')
+ expected = np.array([[ 9, 17, 24],
+ [ 7, 13, 30],
+ [ 9, 15, 26],
+ [ 9, 17, 24],
+ [12, 14, 24]])
+ assert_array_equal(sample, expected)
+
+ def test_repeatability3(self):
+ random = Generator(MT19937(self.seed))
+ sample = random.multivariate_hypergeometric([20, 30, 50], 12,
+ size=5,
+ method='marginals')
+ expected = np.array([[2, 3, 7],
+ [5, 3, 4],
+ [2, 5, 5],
+ [5, 3, 4],
+ [1, 5, 6]])
+ assert_array_equal(sample, expected)
+
+
+class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
@@ -149,7 +290,7 @@ class TestSetState(object):
self.rg.negative_binomial(0.5, 0.5)
-class TestIntegers(object):
+class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
@@ -329,11 +470,11 @@ class TestIntegers(object):
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
- 'int8': 'd1746364b48a020dab9ef0568e6c0cd2',
+ 'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
- 'uint8': 'd1746364b48a020dab9ef0568e6c0cd2'}
+ 'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
@@ -358,9 +499,8 @@ class TestIntegers(object):
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
- lbnd = 0 if dt in (np.bool, bool, np.bool_) else np.iinfo(dt).min
- ubnd = 2 if dt in (
- np.bool, bool, np.bool_) else np.iinfo(dt).max + 1
+ lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
+ ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
@@ -399,8 +539,8 @@ class TestIntegers(object):
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
- low_o = np.array([[low]*10], dtype=np.object)
- high_o = np.array([high] * 10, dtype=np.object)
+ low_o = np.array([[low]*10], dtype=object)
+ high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
@@ -442,7 +582,7 @@ class TestIntegers(object):
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
- for dt in (bool, int, np.long):
+ for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
@@ -484,8 +624,26 @@ class TestIntegers(object):
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
-
-class TestRandomDist(object):
+ # chi2max is the maximum acceptable chi-squared value.
+ @pytest.mark.slow
+ @pytest.mark.parametrize('sample_size,high,dtype,chi2max',
+ [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
+ (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
+ (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
+ (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
+ ])
+ def test_integers_small_dtype_chisquared(self, sample_size, high,
+ dtype, chi2max):
+ # Regression test for gh-14774.
+ samples = random.integers(high, size=sample_size, dtype=dtype)
+
+ values, counts = np.unique(samples, return_counts=True)
+ expected = sample_size / high
+ chi2 = ((counts - expected)**2 / expected).sum()
+ assert chi2 < chi2max
+
+
+class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
@@ -1044,12 +1202,13 @@ class TestRandomDist(object):
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
- def test_multivariate_normal(self):
+ @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
+ def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
- actual = random.multivariate_normal(mean, cov, size)
+ actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
@@ -1060,15 +1219,24 @@ class TestRandomDist(object):
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
- actual = random.multivariate_normal(mean, cov)
+ actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
+ # Check that non symmetric covariance input raises exception when
+ # check_valid='raises' if using default svd method.
+ mean = [0, 0]
+ cov = [[1, 2], [1, 2]]
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
- mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
+ method='eigh')
+ assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
+ method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
@@ -1077,10 +1245,23 @@ class TestRandomDist(object):
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise', method='eigh')
+
+ # check degenerate samples from singular covariance matrix
+ cov = [[1, 1], [1, 1]]
+ if method in ('svd', 'eigh'):
+ samples = random.multivariate_normal(mean, cov, size=(3, 2),
+ method=method)
+ assert_array_almost_equal(samples[..., 0], samples[..., 1],
+ decimal=6)
+ else:
+ assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
+ method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
- random.multivariate_normal(mean, cov)
+ random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
@@ -1095,6 +1276,19 @@ class TestRandomDist(object):
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
+ @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
+ def test_multivariate_normal_basic_stats(self, method):
+ random = Generator(MT19937(self.seed))
+ n_s = 1000
+ mean = np.array([1, 2])
+ cov = np.array([[2, 1], [1, 2]])
+ s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
+ s_center = s - mean
+ cov_emp = (s_center.T @ s_center) / (n_s - 1)
+ # these are pretty loose and are only designed to detect major errors
+ assert np.all(np.abs(s_center.mean(-2)) < 0.1)
+ assert np.all(np.abs(cov_emp - cov) < 0.2)
+
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
@@ -1109,6 +1303,11 @@ class TestRandomDist(object):
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
+ def test_negative_binomial_p0_exception(self):
+ # Verify that p=0 raises an exception.
+ with assert_raises(ValueError):
+ x = random.negative_binomial(1, 0)
+
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
@@ -1401,7 +1600,7 @@ class TestRandomDist(object):
assert_array_equal(actual, desired)
-class TestBroadcast(object):
+class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
@@ -1953,7 +2152,7 @@ class TestBroadcast(object):
assert_array_equal(actual, desired)
-class TestThread(object):
+class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
@@ -2000,7 +2199,7 @@ class TestThread(object):
# See Issue #4263
-class TestSingleEltArrayInput(object):
+class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
@@ -2054,7 +2253,7 @@ class TestSingleEltArrayInput(object):
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
- itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
+ itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py
index 3a937f997..456c932d4 100644
--- a/numpy/random/tests/test_generator_mt19937_regressions.py
+++ b/numpy/random/tests/test_generator_mt19937_regressions.py
@@ -1,6 +1,4 @@
-import sys
from numpy.testing import (assert_, assert_array_equal)
-from numpy.compat import long
import numpy as np
import pytest
from numpy.random import Generator, MT19937
@@ -8,7 +6,7 @@ from numpy.random import Generator, MT19937
mt19937 = Generator(MT19937())
-class TestRegression(object):
+class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
@@ -42,13 +40,6 @@ class TestRegression(object):
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
- def test_permutation_longs(self):
- mt19937 = Generator(MT19937(1234))
- a = mt19937.permutation(12)
- mt19937 = Generator(MT19937(1234))
- b = mt19937.permutation(long(12))
- assert_array_equal(a, b)
-
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
@@ -56,9 +47,10 @@ class TestRegression(object):
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
mt19937 = Generator(MT19937(12345))
- shuffled = list(t)
+ shuffled = np.array(t, dtype=object)
mt19937.shuffle(shuffled)
- assert_array_equal(shuffled, [t[2], t[0], t[3], t[1]])
+ expected = np.array([t[2], t[0], t[3], t[1]], dtype=object)
+ assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom BitGenerator does not call into global state
@@ -118,7 +110,7 @@ class TestRegression(object):
# a segfault on garbage collection.
# See gh-7719
mt19937 = Generator(MT19937(1234))
- a = np.array([np.arange(1), np.arange(4)])
+ a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
mt19937.shuffle(a)
@@ -137,7 +129,7 @@ class TestRegression(object):
assert_array_equal(perm, np.array([2, 0, 1]))
assert_array_equal(orig, np.arange(3).view(N))
- class M(object):
+ class M:
a = np.arange(5)
def __array__(self):
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 37bd121f3..c5f79d2c1 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -1,4 +1,3 @@
-from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
@@ -11,7 +10,7 @@ from numpy import random
import sys
-class TestSeed(object):
+class TestSeed:
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
@@ -50,7 +49,7 @@ class TestSeed(object):
[4, 5, 6]])
-class TestBinomial(object):
+class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
@@ -65,7 +64,7 @@ class TestBinomial(object):
assert_raises(ValueError, random.binomial, 1, np.nan)
-class TestMultinomial(object):
+class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
@@ -92,8 +91,14 @@ class TestMultinomial(object):
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
+ def test_multidimensional_pvals(self):
+ assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]])
+ assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]])
+ assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]])
+ assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]]))
-class TestSetState(object):
+
+class TestSetState:
def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
@@ -141,7 +146,7 @@ class TestSetState(object):
self.prng.negative_binomial(0.5, 0.5)
-class TestRandint(object):
+class TestRandint:
rfunc = np.random.randint
@@ -269,7 +274,7 @@ class TestRandint(object):
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
- for dt in (bool, int, np.long):
+ for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
@@ -279,7 +284,7 @@ class TestRandint(object):
assert_equal(type(sample), dt)
-class TestRandomDist(object):
+class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
@@ -974,7 +979,7 @@ class TestRandomDist(object):
assert_array_equal(actual, desired)
-class TestBroadcast(object):
+class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
@@ -1544,7 +1549,7 @@ class TestBroadcast(object):
assert_raises(ValueError, logseries, bad_p_two * 3)
-class TestThread(object):
+class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
@@ -1588,7 +1593,7 @@ class TestThread(object):
# See Issue #4263
-class TestSingleEltArrayInput(object):
+class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py
index a0edc5c23..ebe8558ba 100644
--- a/numpy/random/tests/test_randomstate.py
+++ b/numpy/random/tests/test_randomstate.py
@@ -11,7 +11,8 @@ from numpy.testing import (
suppress_warnings
)
-from numpy.random import MT19937, PCG64, mtrand as random
+from numpy.random import MT19937, PCG64
+from numpy import random
INT_FUNCS = {'binomial': (100.0, 0.6),
'geometric': (.5,),
@@ -60,7 +61,7 @@ def assert_mt19937_state_equal(a, b):
assert_equal(a['gauss'], b['gauss'])
-class TestSeed(object):
+class TestSeed:
def test_scalar(self):
s = random.RandomState(0)
assert_equal(s.randint(1000), 684)
@@ -107,7 +108,7 @@ class TestSeed(object):
assert_raises(ValueError, random.RandomState, MT19937)
-class TestBinomial(object):
+class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
@@ -122,7 +123,7 @@ class TestBinomial(object):
assert_raises(ValueError, random.binomial, 1, np.nan)
-class TestMultinomial(object):
+class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
@@ -167,7 +168,7 @@ class TestMultinomial(object):
assert_array_equal(non_contig, contig)
-class TestSetState(object):
+class TestSetState:
def setup(self):
self.seed = 1234567890
self.random_state = random.RandomState(self.seed)
@@ -228,7 +229,7 @@ class TestSetState(object):
new_state = ('Unknown', ) + state[1:]
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(TypeError, self.random_state.set_state,
- np.array(new_state, dtype=np.object))
+ np.array(new_state, dtype=object))
state = self.random_state.get_state(legacy=False)
del state['bit_generator']
assert_raises(ValueError, self.random_state.set_state, state)
@@ -254,7 +255,7 @@ class TestSetState(object):
assert repr(self.random_state).startswith('RandomState(MT19937)')
-class TestRandint(object):
+class TestRandint:
rfunc = random.randint
@@ -381,7 +382,7 @@ class TestRandint(object):
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
- for dt in (bool, int, np.long):
+ for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
@@ -391,7 +392,7 @@ class TestRandint(object):
assert_equal(type(sample), dt)
-class TestRandomDist(object):
+class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
@@ -454,7 +455,7 @@ class TestRandomDist(object):
random.seed(self.seed)
rs = random.RandomState(self.seed)
actual = rs.tomaxint(size=(3, 2))
- if np.iinfo(np.int).max == 2147483647:
+ if np.iinfo(int).max == 2147483647:
desired = np.array([[1328851649, 731237375],
[1270502067, 320041495],
[1908433478, 499156889]], dtype=np.int64)
@@ -1244,7 +1245,7 @@ class TestRandomDist(object):
assert_array_equal(actual, desired)
-class TestBroadcast(object):
+class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
@@ -1831,7 +1832,7 @@ class TestBroadcast(object):
assert_raises(ValueError, logseries, bad_p_two * 3)
-class TestThread(object):
+class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
@@ -1878,7 +1879,7 @@ class TestThread(object):
# See Issue #4263
-class TestSingleEltArrayInput(object):
+class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py
index edf32ea97..4eb82fc4c 100644
--- a/numpy/random/tests/test_randomstate_regression.py
+++ b/numpy/random/tests/test_randomstate_regression.py
@@ -5,13 +5,12 @@ import pytest
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
-from numpy.compat import long
import numpy as np
-from numpy.random import mtrand as random
+from numpy import random
-class TestRegression(object):
+class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
@@ -52,13 +51,6 @@ class TestRegression(object):
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
- def test_permutation_longs(self):
- random.seed(1234)
- a = random.permutation(12)
- random.seed(1234)
- b = random.permutation(long(12))
- assert_array_equal(a, b)
-
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
@@ -68,7 +60,8 @@ class TestRegression(object):
random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
- assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
+ expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
+ assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
@@ -128,7 +121,7 @@ class TestRegression(object):
# a segfault on garbage collection.
# See gh-7719
random.seed(1234)
- a = np.array([np.arange(1), np.arange(4)])
+ a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
random.shuffle(a)
@@ -147,7 +140,7 @@ class TestRegression(object):
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
- class M(object):
+ class M:
a = np.arange(5)
def __array__(self):
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index 509e2d57f..278622287 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -1,15 +1,12 @@
-from __future__ import division, absolute_import, print_function
-
import sys
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
from numpy import random
-from numpy.compat import long
import numpy as np
-class TestRegression(object):
+class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
@@ -50,13 +47,6 @@ class TestRegression(object):
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
- def test_permutation_longs(self):
- np.random.seed(1234)
- a = np.random.permutation(12)
- np.random.seed(1234)
- b = np.random.permutation(long(12))
- assert_array_equal(a, b)
-
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
@@ -66,7 +56,8 @@ class TestRegression(object):
np.random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
- assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
+ expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
+ assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
@@ -126,7 +117,7 @@ class TestRegression(object):
# a segfault on garbage collection.
# See gh-7719
np.random.seed(1234)
- a = np.array([np.arange(1), np.arange(4)])
+ a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
np.random.shuffle(a)
@@ -145,7 +136,7 @@ class TestRegression(object):
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
- class M(object):
+ class M:
a = np.arange(5)
def __array__(self):
diff --git a/numpy/random/tests/test_seed_sequence.py b/numpy/random/tests/test_seed_sequence.py
index 8d6d604a2..fe23680ed 100644
--- a/numpy/random/tests/test_seed_sequence.py
+++ b/numpy/random/tests/test_seed_sequence.py
@@ -1,7 +1,7 @@
import numpy as np
from numpy.testing import assert_array_equal
-from numpy.random.bit_generator import SeedSequence
+from numpy.random import SeedSequence
def test_reference_data():
diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py
index 6e641b5f4..ebfc6825e 100644
--- a/numpy/random/tests/test_smoke.py
+++ b/numpy/random/tests/test_smoke.py
@@ -1,5 +1,4 @@
import pickle
-import time
from functools import partial
import numpy as np
@@ -8,7 +7,7 @@ from numpy.testing import assert_equal, assert_, assert_array_equal
from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64)
@pytest.fixture(scope='module',
- params=(np.bool, np.int8, np.int16, np.int32, np.int64,
+ params=(np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64))
def dtype(request):
return request.param
@@ -92,7 +91,7 @@ def warmup(rg, n=None):
rg.random(n, dtype=np.float32)
-class RNG(object):
+class RNG:
@classmethod
def setup_class(cls):
# Overridden in test classes. Place holder to silence IDE noise
@@ -655,7 +654,7 @@ class RNG(object):
rg.standard_gamma(1.0, out=existing[::3])
def test_integers_broadcast(self, dtype):
- if dtype == np.bool:
+ if dtype == np.bool_:
upper = 2
lower = 0
else:
@@ -672,7 +671,7 @@ class RNG(object):
assert_equal(a, c)
self._reset_state()
d = self.rg.integers(np.array(
- [lower] * 10), np.array([upper], dtype=np.object), size=10,
+ [lower] * 10), np.array([upper], dtype=object), size=10,
dtype=dtype)
assert_equal(a, d)
self._reset_state()
@@ -701,7 +700,7 @@ class RNG(object):
assert out.shape == (1,)
def test_integers_broadcast_errors(self, dtype):
- if dtype == np.bool:
+ if dtype == np.bool_:
upper = 2
lower = 0
else:
diff --git a/numpy/setup.py b/numpy/setup.py
index 4ccdaeea5..fb9b36b78 100644
--- a/numpy/setup.py
+++ b/numpy/setup.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
+#!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index a8bd4fc15..e1f87621f 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -5,8 +5,6 @@ in a single location, so that test scripts can just import it and work right
away.
"""
-from __future__ import division, absolute_import, print_function
-
from unittest import TestCase
from ._private.utils import *
diff --git a/numpy/testing/_private/decorators.py b/numpy/testing/_private/decorators.py
index 24c4e385d..b4b6259a0 100644
--- a/numpy/testing/_private/decorators.py
+++ b/numpy/testing/_private/decorators.py
@@ -13,14 +13,7 @@ function name, setup and teardown functions and so on - see
``nose.tools`` for more information.
"""
-from __future__ import division, absolute_import, print_function
-
-try:
- # Accessing collections abstract classes from collections
- # has been deprecated since Python 3.3
- import collections.abc as collections_abc
-except ImportError:
- import collections as collections_abc
+import collections.abc
from .utils import SkipTest, assert_warns, HAS_REFCOUNT
@@ -131,7 +124,7 @@ def skipif(skip_condition, msg=None):
import nose
# Allow for both boolean or callable skip conditions.
- if isinstance(skip_condition, collections_abc.Callable):
+ if isinstance(skip_condition, collections.abc.Callable):
skip_val = lambda: skip_condition()
else:
skip_val = lambda: skip_condition
@@ -159,8 +152,7 @@ def skipif(skip_condition, msg=None):
if skip_val():
raise SkipTest(get_msg(f, msg))
else:
- for x in f(*args, **kwargs):
- yield x
+ yield from f(*args, **kwargs)
# Choose the right skipper to use when building the actual decorator.
if nose.util.isgenerator(f):
@@ -207,7 +199,7 @@ def knownfailureif(fail_condition, msg=None):
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
- if isinstance(fail_condition, collections_abc.Callable):
+ if isinstance(fail_condition, collections.abc.Callable):
fail_val = lambda: fail_condition()
else:
fail_val = lambda: fail_condition
@@ -262,7 +254,7 @@ def deprecated(conditional=True):
with assert_warns(DeprecationWarning):
f(*args, **kwargs)
- if isinstance(conditional, collections_abc.Callable):
+ if isinstance(conditional, collections.abc.Callable):
cond = conditional()
else:
cond = conditional
diff --git a/numpy/testing/_private/noseclasses.py b/numpy/testing/_private/noseclasses.py
index e99bbc97d..493bacfdd 100644
--- a/numpy/testing/_private/noseclasses.py
+++ b/numpy/testing/_private/noseclasses.py
@@ -4,8 +4,6 @@
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import doctest
@@ -268,7 +266,7 @@ class NumpyDoctest(npd.Doctest):
return npd.Doctest.wantFile(self, file)
-class Unplugger(object):
+class Unplugger:
""" Nose plugin to remove named plugin late in loading
By default it removes the "doctest" plugin.
diff --git a/numpy/testing/_private/nosetester.py b/numpy/testing/_private/nosetester.py
index 19569a509..bd6d002aa 100644
--- a/numpy/testing/_private/nosetester.py
+++ b/numpy/testing/_private/nosetester.py
@@ -4,12 +4,9 @@ Nose test running.
This module implements ``test()`` and ``bench()`` functions for NumPy modules.
"""
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import warnings
-from numpy.compat import basestring
import numpy as np
from .utils import import_nose, suppress_warnings
@@ -112,7 +109,7 @@ def run_module_suite(file_to_run=None, argv=None):
nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
-class NoseTester(object):
+class NoseTester:
"""
Nose test runner.
@@ -214,7 +211,7 @@ class NoseTester(object):
'''
argv = [__file__, self.package_path, '-s']
if label and label != 'full':
- if not isinstance(label, basestring):
+ if not isinstance(label, str):
raise TypeError('Selection label should be a string')
if label == 'fast':
label = 'not slow'
@@ -421,7 +418,7 @@ class NoseTester(object):
_warn_opts = dict(develop=(Warning,),
release=())
- if isinstance(raise_warnings, basestring):
+ if isinstance(raise_warnings, str):
raise_warnings = _warn_opts[raise_warnings]
with suppress_warnings("location") as sup:
@@ -450,20 +447,6 @@ class NoseTester(object):
warnings.simplefilter("always")
from ...distutils import cpuinfo
sup.filter(category=UserWarning, module=cpuinfo)
- # See #7949: Filter out deprecation warnings due to the -3 flag to
- # python 2
- if sys.version_info.major == 2 and sys.py3kwarning:
- # This is very specific, so using the fragile module filter
- # is fine
- import threading
- sup.filter(DeprecationWarning,
- r"sys\.exc_clear\(\) not supported in 3\.x",
- module=threading)
- sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
- sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
- sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
# Filter out some deprecation warnings inside nose 1.3.7 when run
# on python 3.5b2. See
# https://github.com/nose-devs/nose/issues/929
diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py
index 489d8e09a..3bd8ede91 100644
--- a/numpy/testing/_private/parameterized.py
+++ b/numpy/testing/_private/parameterized.py
@@ -31,11 +31,10 @@ either expressed or implied, of David Wolever.
"""
import re
-import sys
import inspect
import warnings
from functools import wraps
-from types import MethodType as MethodType
+from types import MethodType
from collections import namedtuple
try:
@@ -45,30 +44,6 @@ except ImportError:
from unittest import TestCase
-PY2 = sys.version_info[0] == 2
-
-
-if PY2:
- from types import InstanceType
- lzip = zip
- text_type = unicode
- bytes_type = str
- string_types = basestring,
- def make_method(func, instance, type):
- return MethodType(func, instance, type)
-else:
- # Python 3 doesn't have an InstanceType, so just use a dummy type.
- class InstanceType():
- pass
- lzip = lambda *a: list(zip(*a))
- text_type = str
- string_types = str,
- bytes_type = bytes
- def make_method(func, instance, type):
- if instance is None:
- return func
- return MethodType(func, instance)
-
_param = namedtuple("param", "args kwargs")
class param(_param):
@@ -122,7 +97,7 @@ class param(_param):
"""
if isinstance(args, param):
return args
- elif isinstance(args, string_types):
+ elif isinstance(args, (str,)):
args = (args, )
try:
return cls(*args)
@@ -179,7 +154,7 @@ def parameterized_argument_value_pairs(func, p):
named_args = argspec.args[arg_offset:]
- result = lzip(named_args, p.args)
+ result = list(zip(named_args, p.args))
named_args = argspec.args[len(result) + arg_offset:]
varargs = p.args[len(result):]
@@ -214,11 +189,11 @@ def short_repr(x, n=64):
"""
x_repr = repr(x)
- if isinstance(x_repr, bytes_type):
+ if isinstance(x_repr, bytes):
try:
- x_repr = text_type(x_repr, "utf-8")
+ x_repr = str(x_repr, "utf-8")
except UnicodeDecodeError:
- x_repr = text_type(x_repr, "latin1")
+ x_repr = str(x_repr, "latin1")
if len(x_repr) > n:
x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
return x_repr
@@ -246,7 +221,7 @@ def default_doc_func(func, num, p):
def default_name_func(func, num, p):
base_name = func.__name__
name_suffix = "_%s" %(num, )
- if len(p.args) > 0 and isinstance(p.args[0], string_types):
+ if len(p.args) > 0 and isinstance(p.args[0], (str,)):
name_suffix += "_" + parameterized.to_safe_name(p.args[0])
return base_name + name_suffix
@@ -271,7 +246,7 @@ def set_test_runner(name):
def detect_runner():
""" Guess which test runner we're using by traversing the stack and looking
for the first matching module. This *should* be reasonably safe, as
- it's done during test disocvery where the test runner should be the
+ it's done during test discovery where the test runner should be the
stack frame immediately outside. """
if _test_runner_override is not None:
return _test_runner_override
@@ -286,17 +261,14 @@ def detect_runner():
if module in _test_runners:
_test_runner_guess = module
break
- if record[1].endswith("python2.6/unittest.py"):
- _test_runner_guess = "unittest"
- break
else:
_test_runner_guess = None
return _test_runner_guess
-class parameterized(object):
+class parameterized:
""" Parameterize a test case::
- class TestInt(object):
+ class TestInt:
@parameterized([
("A", 10),
("F", 15),
@@ -324,15 +296,6 @@ class parameterized(object):
@wraps(test_func)
def wrapper(test_self=None):
test_cls = test_self and type(test_self)
- if test_self is not None:
- if issubclass(test_cls, InstanceType):
- raise TypeError((
- "@parameterized can't be used with old-style classes, but "
- "%r has an old-style class. Consider using a new-style "
- "class, or '@parameterized.expand' "
- "(see http://stackoverflow.com/q/54867/71522 for more "
- "information on old-style classes)."
- ) %(test_self, ))
original_doc = wrapper.__doc__
for num, args in enumerate(wrapper.parameterized_input):
@@ -365,15 +328,7 @@ class parameterized(object):
# Python 3 doesn't let us pull the function out of a bound method.
unbound_func = nose_func
if test_self is not None:
- # Under nose on Py2 we need to return an unbound method to make
- # sure that the `self` in the method is properly shared with the
- # `self` used in `setUp` and `tearDown`. But only there. Everyone
- # else needs a bound method.
- func_self = (
- None if PY2 and detect_runner() == "nose" else
- test_self
- )
- nose_func = make_method(nose_func, func_self, type(test_self))
+ nose_func = MethodType(nose_func, test_self)
return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
def assert_not_in_testcase_subclass(self):
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 8a31fcf15..4569efa91 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -2,8 +2,6 @@
Utility function to facilitate testing.
"""
-from __future__ import division, absolute_import, print_function
-
import os
import sys
import platform
@@ -21,11 +19,9 @@ import pprint
from numpy.core import(
intp, float32, empty, arange, array_repr, ndarray, isnat, array)
+import numpy.linalg.lapack_lite
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from StringIO import StringIO
+from io import StringIO
__all__ = [
'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
@@ -39,7 +35,7 @@ __all__ = [
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
'_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
- 'break_cycles',
+ 'break_cycles', 'HAS_LAPACK64'
]
@@ -53,6 +49,7 @@ verbose = 0
IS_PYPY = platform.python_implementation() == 'PyPy'
HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
+HAS_LAPACK64 = numpy.linalg.lapack_lite._ilp64
def import_nose():
@@ -195,9 +192,8 @@ elif sys.platform[:5] == 'linux':
"""
try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
+ with open(_proc_pid_stat, 'r') as f:
+ l = f.readline().split(' ')
return int(l[22])
except Exception:
return
@@ -224,9 +220,8 @@ if sys.platform[:5] == 'linux':
if not _load_time:
_load_time.append(time.time())
try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
+ with open(_proc_pid_stat, 'r') as f:
+ l = f.readline().split(' ')
return int(l[13])
except Exception:
return int(100*(time.time()-_load_time[0]))
@@ -284,8 +279,12 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
check that all elements of these objects are equal. An exception is raised
at the first conflicting values.
+ When one of `actual` and `desired` is a scalar and the other is array_like,
+ the function checks that each element of the array_like object is equal to
+ the scalar.
+
This function handles NaN comparisons as if NaN was a "normal" number.
- That is, no assertion is raised if both objects have NaNs in the same
+ That is, AssertionError is not raised if both objects have NaNs in the same
positions. This is in contrast to the IEEE standard on NaNs, which says
that NaN compared to anything must return False.
@@ -374,21 +373,6 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
if isscalar(desired) != isscalar(actual):
raise AssertionError(msg)
- # Inf/nan/negative zero handling
- try:
- isdesnan = gisnan(desired)
- isactnan = gisnan(actual)
- if isdesnan and isactnan:
- return # both nan, so equal
-
- # handle signed zero specially for floats
- if desired == 0 and actual == 0:
- if not signbit(desired) == signbit(actual):
- raise AssertionError(msg)
-
- except (TypeError, ValueError, NotImplementedError):
- pass
-
try:
isdesnat = isnat(desired)
isactnat = isnat(actual)
@@ -404,6 +388,33 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
except (TypeError, ValueError, NotImplementedError):
pass
+ # Inf/nan/negative zero handling
+ try:
+ isdesnan = gisnan(desired)
+ isactnan = gisnan(actual)
+ if isdesnan and isactnan:
+ return # both nan, so equal
+
+ # handle signed zero specially for floats
+ array_actual = array(actual)
+ array_desired = array(desired)
+ if (array_actual.dtype.char in 'Mm' or
+ array_desired.dtype.char in 'Mm'):
+ # version 1.18
+ # until this version, gisnan failed for datetime64 and timedelta64.
+ # Now it succeeds but comparison to scalar with a different type
+ # emits a DeprecationWarning.
+ # Avoid that by skipping the next check
+ raise NotImplementedError('cannot compare to a scalar '
+ 'with a different type')
+
+ if desired == 0 and actual == 0:
+ if not signbit(desired) == signbit(actual):
+ raise AssertionError(msg)
+
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
try:
# Explicitly use __eq__ for comparison, gh-2552
if not (desired == actual):
@@ -519,7 +530,8 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
...
AssertionError:
Arrays are not almost equal to 9 decimals
- Mismatch: 50%
+ <BLANKLINE>
+ Mismatched elements: 1 / 2 (50%)
Max absolute difference: 6.66669964e-09
Max relative difference: 2.85715698e-09
x: array([1. , 2.333333333])
@@ -841,10 +853,11 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
Raises an AssertionError if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
- elements of these objects are equal. An exception is raised at
- shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if
- both objects have NaNs in the same positions.
+ elements of these objects are equal (but see the Notes for the special
+ handling of a scalar). An exception is raised at shape mismatch or
+ conflicting values. In contrast to the standard usage in numpy, NaNs
+ are compared like numbers, no assertion is raised if both objects have
+ NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
@@ -871,6 +884,12 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+ Notes
+ -----
+ When one of `x` and `y` is a scalar and the other is array_like, the
+ function checks that each element of the array_like object is equal to
+ the scalar.
+
Examples
--------
The first assert does not raise an exception:
@@ -878,7 +897,7 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
... [np.exp(0),2.33333, np.nan])
- Assert fails with numerical inprecision with floats:
+ Assert fails with numerical imprecision with floats:
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan])
@@ -886,7 +905,8 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
...
AssertionError:
Arrays are not equal
- Mismatch: 33.3%
+ <BLANKLINE>
+ Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 4.4408921e-16
Max relative difference: 1.41357986e-16
x: array([1. , 3.141593, nan])
@@ -899,6 +919,12 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
... [1, np.sqrt(np.pi)**2, np.nan],
... rtol=1e-10, atol=0)
+ As mentioned in the Notes section, `assert_array_equal` has special
+ handling for scalars. Here the test checks that each value in `x` is 3:
+
+ >>> x = np.full((2, 5), fill_value=3)
+ >>> np.testing.assert_array_equal(x, 3)
+
"""
__tracebackhide__ = True # Hide traceback for py.test
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
@@ -963,7 +989,8 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
...
AssertionError:
Arrays are not almost equal to 5 decimals
- Mismatch: 33.3%
+ <BLANKLINE>
+ Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 6.e-05
Max relative difference: 2.57136612e-05
x: array([1. , 2.33333, nan])
@@ -975,6 +1002,7 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
...
AssertionError:
Arrays are not almost equal to 5 decimals
+ <BLANKLINE>
x and y nan location mismatch:
x: array([1. , 2.33333, nan])
y: array([1. , 2.33333, 5. ])
@@ -1062,7 +1090,8 @@ def assert_array_less(x, y, err_msg='', verbose=True):
...
AssertionError:
Arrays are not less-ordered
- Mismatch: 33.3%
+ <BLANKLINE>
+ Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 1.
Max relative difference: 0.5
x: array([ 1., 1., nan])
@@ -1073,7 +1102,8 @@ def assert_array_less(x, y, err_msg='', verbose=True):
...
AssertionError:
Arrays are not less-ordered
- Mismatch: 50%
+ <BLANKLINE>
+ Mismatched elements: 1 / 2 (50%)
Max absolute difference: 2.
Max relative difference: 0.66666667
x: array([1., 4.])
@@ -1084,6 +1114,7 @@ def assert_array_less(x, y, err_msg='', verbose=True):
...
AssertionError:
Arrays are not less-ordered
+ <BLANKLINE>
(shapes (3,), (1,) mismatch)
x: array([1., 2., 3.])
y: array([4])
@@ -1315,14 +1346,7 @@ def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
"""
__tracebackhide__ = True # Hide traceback for py.test
-
- if sys.version_info.major >= 3:
- funcname = _d.assertRaisesRegex
- else:
- # Only present in Python 2.7, missing from unittest in 2.6
- funcname = _d.assertRaisesRegexp
-
- return funcname(exception_class, expected_regexp, *args, **kwargs)
+ return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
def decorate_methods(cls, decorator, testmatch=None):
@@ -1427,7 +1451,9 @@ def _assert_valid_refcount(op):
"""
if not HAS_REFCOUNT:
return True
- import numpy as np, gc
+
+ import gc
+ import numpy as np
b = np.arange(100*100).reshape(100, 100)
c = b
@@ -1590,6 +1616,12 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
AssertionError
If one or more elements differ by more than `maxulp`.
+ Notes
+ -----
+ For computing the ULP difference, this API does not differentiate between
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
+ is zero.
+
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
@@ -1605,8 +1637,9 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
import numpy as np
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
- raise AssertionError("Arrays are not almost equal up to %g ULP" %
- maxulp)
+ raise AssertionError("Arrays are not almost equal up to %g "
+ "ULP (max difference is %g ULP)" %
+ (maxulp, np.max(ret)))
return ret
@@ -1629,6 +1662,12 @@ def nulp_diff(x, y, dtype=None):
number of representable floating point numbers between each item in x
and y.
+ Notes
+ -----
+ For computing the ULP difference, this API does not differentiate between
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
+ is zero.
+
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
@@ -1648,8 +1687,11 @@ def nulp_diff(x, y, dtype=None):
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
- x = np.array(x, dtype=t)
- y = np.array(y, dtype=t)
+ x = np.array([x], dtype=t)
+ y = np.array([y], dtype=t)
+
+ x[np.isnan(x)] = np.nan
+ y[np.isnan(y)] = np.nan
if not x.shape == y.shape:
raise ValueError("x and y do not have the same shape: %s - %s" %
@@ -1971,7 +2013,7 @@ class clear_and_catch_warnings(warnings.catch_warnings):
mod.__warningregistry__.update(self._warnreg_copies[mod])
-class suppress_warnings(object):
+class suppress_warnings:
"""
Context manager and decorator doing much the same as
``warnings.catch_warnings``.
@@ -2186,8 +2228,7 @@ class suppress_warnings(object):
del self._filters
def _showwarning(self, message, category, filename, lineno,
- *args, **kwargs):
- use_warnmsg = kwargs.pop("use_warnmsg", None)
+ *args, use_warnmsg=None, **kwargs):
for cat, _, pattern, mod, rec in (
self._suppressions + self._tmp_suppressions)[::-1]:
if (issubclass(category, cat) and
@@ -2351,3 +2392,118 @@ def break_cycles():
gc.collect()
# one more, just to make sure
gc.collect()
+
+
+def requires_memory(free_bytes):
+ """Decorator to skip a test if not enough memory is available"""
+ import pytest
+
+ def decorator(func):
+ @wraps(func)
+ def wrapper(*a, **kw):
+ msg = check_free_memory(free_bytes)
+ if msg is not None:
+ pytest.skip(msg)
+
+ try:
+ return func(*a, **kw)
+ except MemoryError:
+ # Probably ran out of memory regardless: don't regard as failure
+ pytest.xfail("MemoryError raised")
+
+ return wrapper
+
+ return decorator
+
+
+def check_free_memory(free_bytes):
+ """
+ Check whether `free_bytes` amount of memory is currently free.
+ Returns: None if enough memory available, otherwise error message
+ """
+ env_var = 'NPY_AVAILABLE_MEM'
+ env_value = os.environ.get(env_var)
+ if env_value is not None:
+ try:
+ mem_free = _parse_size(env_value)
+ except ValueError as exc:
+ raise ValueError('Invalid environment variable {}: {!s}'.format(
+ env_var, exc))
+
+ msg = ('{0} GB memory required, but environment variable '
+ 'NPY_AVAILABLE_MEM={1} set'.format(
+ free_bytes/1e9, env_value))
+ else:
+ mem_free = _get_mem_available()
+
+ if mem_free is None:
+ msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
+ "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
+ "the test.")
+ mem_free = -1
+ else:
+ msg = '{0} GB memory required, but {1} GB available'.format(
+ free_bytes/1e9, mem_free/1e9)
+
+ return msg if mem_free < free_bytes else None
+
+
+def _parse_size(size_str):
+ """Convert memory size strings ('12 GB' etc.) to float"""
+ suffixes = {'': 1, 'b': 1,
+ 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4,
+ 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4,
+ 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4}
+
+ size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
+ '|'.join(suffixes.keys())), re.I)
+
+ m = size_re.match(size_str.lower())
+ if not m or m.group(2) not in suffixes:
+ raise ValueError("value {!r} not a valid size".format(size_str))
+ return int(float(m.group(1)) * suffixes[m.group(2)])
+
+
+def _get_mem_available():
+ """Return available memory in bytes, or None if unknown."""
+ try:
+ import psutil
+ return psutil.virtual_memory().available
+ except (ImportError, AttributeError):
+ pass
+
+ if sys.platform.startswith('linux'):
+ info = {}
+ with open('/proc/meminfo', 'r') as f:
+ for line in f:
+ p = line.split()
+ info[p[0].strip(':').lower()] = int(p[1]) * 1024
+
+ if 'memavailable' in info:
+ # Linux >= 3.14
+ return info['memavailable']
+ else:
+ return info['memfree'] + info['cached']
+
+ return None
+
+
+def _no_tracing(func):
+ """
+ Decorator to temporarily turn off tracing for the duration of a test.
+ Needed in tests that check refcounting, otherwise the tracing itself
+ influences the refcounts
+ """
+ if not hasattr(sys, 'gettrace'):
+ return func
+ else:
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ original_trace = sys.gettrace()
+ try:
+ sys.settrace(None)
+ return func(*args, **kwargs)
+ finally:
+ sys.settrace(original_trace)
+ return wrapper
+
diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py
index 72b22cee1..8024df128 100755
--- a/numpy/testing/print_coercion_tables.py
+++ b/numpy/testing/print_coercion_tables.py
@@ -1,13 +1,11 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Prints type-coercion tables for the built-in NumPy types
"""
-from __future__ import division, absolute_import, print_function
-
import numpy as np
# Generic object that can be added, but doesn't do anything else
-class GenericObject(object):
+class GenericObject:
def __init__(self, v):
self.v = v
diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py
index 7c3f2fbdf..f4970991c 100755
--- a/numpy/testing/setup.py
+++ b/numpy/testing/setup.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-from __future__ import division, print_function
-
+#!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index c029bf90c..b60d6dfbc 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -2,8 +2,6 @@
Test the decorators from ``testing.decorators``.
"""
-from __future__ import division, absolute_import, print_function
-
import warnings
import pytest
@@ -23,7 +21,7 @@ else:
@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose")
-class TestNoseDecorators(object):
+class TestNoseDecorators:
# These tests are run in a class for simplicity while still
# getting a report on each, skipped or success.
@@ -108,8 +106,7 @@ class TestNoseDecorators(object):
def test_skip_generators_hardcoded(self):
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
- for i in range(x):
- yield i
+ yield from range(x)
try:
for j in g1(10):
@@ -121,8 +118,7 @@ class TestNoseDecorators(object):
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
- for i in range(x):
- yield i
+ yield from range(x)
raise self.DidntSkipException('FAIL')
try:
@@ -139,8 +135,7 @@ class TestNoseDecorators(object):
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
- for i in range(x):
- yield i
+ yield from range(x)
try:
skip_flag = 'skip me!'
@@ -153,8 +148,7 @@ class TestNoseDecorators(object):
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
- for i in range(x):
- yield i
+ yield from range(x)
raise self.DidntSkipException('FAIL')
try:
diff --git a/numpy/testing/tests/test_doctesting.py b/numpy/testing/tests/test_doctesting.py
index b77cd93e0..92c2156d8 100644
--- a/numpy/testing/tests/test_doctesting.py
+++ b/numpy/testing/tests/test_doctesting.py
@@ -1,8 +1,6 @@
""" Doctests for NumPy-specific nose/doctest modifications
"""
-from __future__ import division, absolute_import, print_function
-
#FIXME: None of these tests is run, because 'check' is not a recognized
# testing prefix.
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 44f93a693..b899e94f4 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -1,10 +1,7 @@
-from __future__ import division, absolute_import, print_function
-
import warnings
import sys
import os
import itertools
-import textwrap
import pytest
import weakref
@@ -20,7 +17,7 @@ from numpy.testing import (
from numpy.core.overrides import ARRAY_FUNCTION_ENABLED
-class _GenericTest(object):
+class _GenericTest:
def _test_equal(self, a, b):
self._assert_func(a, b)
@@ -90,6 +87,21 @@ class TestArrayEqual(_GenericTest):
for t in ['S1', 'U1']:
foo(t)
+ def test_0_ndim_array(self):
+ x = np.array(473963742225900817127911193656584771)
+ y = np.array(18535119325151578301457182298393896)
+ assert_raises(AssertionError, self._assert_func, x, y)
+
+ y = x
+ self._assert_func(x, y)
+
+ x = np.array(43)
+ y = np.array(10)
+ assert_raises(AssertionError, self._assert_func, x, y)
+
+ y = x
+ self._assert_func(x, y)
+
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
@@ -196,7 +208,7 @@ class TestArrayEqual(_GenericTest):
self._test_not_equal(b, a)
-class TestBuildErrorMessage(object):
+class TestBuildErrorMessage:
def test_build_err_msg_defaults(self):
x = np.array([1.00001, 2.00002, 3.00003])
@@ -328,24 +340,6 @@ class TestEqual(TestArrayEqual):
self._assert_func(x, x)
self._test_not_equal(x, y)
- def test_error_message(self):
- with pytest.raises(AssertionError) as exc_info:
- self._assert_func(np.array([1, 2]), np.array([[1, 2]]))
- msg = str(exc_info.value)
- msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
- msg_reference = textwrap.dedent("""\
-
- Arrays are not equal
-
- (shapes (2,), (1, 2) mismatch)
- x: array([1, 2])
- y: array([[1, 2]])""")
-
- try:
- assert_equal(msg, msg_reference)
- except AssertionError:
- assert_equal(msg2, msg_reference)
-
def test_object(self):
#gh-12942
import datetime
@@ -603,14 +597,14 @@ class TestAlmostEqual(_GenericTest):
self._assert_func(a, a)
-class TestApproxEqual(object):
+class TestApproxEqual:
def setup(self):
self._assert_func = assert_approx_equal
- def test_simple_arrays(self):
- x = np.array([1234.22])
- y = np.array([1234.23])
+ def test_simple_0d_arrays(self):
+ x = np.array(1234.22)
+ y = np.array(1234.23)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
@@ -646,7 +640,7 @@ class TestApproxEqual(object):
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
-class TestArrayAssertLess(object):
+class TestArrayAssertLess:
def setup(self):
self._assert_func = assert_array_less
@@ -756,7 +750,7 @@ class TestArrayAssertLess(object):
@pytest.mark.skip(reason="The raises decorator depends on Nose")
-class TestRaises(object):
+class TestRaises:
def setup(self):
class MyException(Exception):
@@ -790,7 +784,7 @@ class TestRaises(object):
raise AssertionError("should have raised an AssertionError")
-class TestWarns(object):
+class TestWarns:
def test_warn(self):
def f():
@@ -841,7 +835,7 @@ class TestWarns(object):
raise AssertionError("wrong warning caught by assert_warn")
-class TestAssertAllclose(object):
+class TestAssertAllclose:
def test_simple(self):
x = 1e-3
@@ -911,7 +905,7 @@ class TestAssertAllclose(object):
assert_('Max relative difference: 0.5' in msg)
-class TestArrayAlmostEqualNulp(object):
+class TestArrayAlmostEqualNulp:
def test_float64_pass(self):
# The number of units of least precision
@@ -1108,7 +1102,7 @@ class TestArrayAlmostEqualNulp(object):
xi, y + y*1j, nulp)
-class TestULP(object):
+class TestULP:
def test_equal(self):
x = np.random.randn(10)
@@ -1164,7 +1158,7 @@ class TestULP(object):
maxulp=maxulp))
-class TestStringEqual(object):
+class TestStringEqual:
def test_simple(self):
assert_string_equal("hello", "hello")
assert_string_equal("hello\nmultiline", "hello\nmultiline")
@@ -1226,7 +1220,7 @@ def test_warn_len_equal_call_scenarios():
# check that no assertion is uncaught
# parallel scenario -- no warning issued yet
- class mod(object):
+ class mod:
pass
mod_inst = mod()
@@ -1236,7 +1230,7 @@ def test_warn_len_equal_call_scenarios():
# serial test scenario -- the __warningregistry__
# attribute should be present
- class mod(object):
+ class mod:
def __init__(self):
self.__warningregistry__ = {'warning1':1,
'warning2':2}
@@ -1511,7 +1505,7 @@ def test_clear_and_catch_warnings_inherit():
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
-class TestAssertNoGcCycles(object):
+class TestAssertNoGcCycles:
""" Test assert_no_gc_cycles """
def test_passes(self):
def no_cycle():
@@ -1545,7 +1539,7 @@ class TestAssertNoGcCycles(object):
error, instead of hanging forever trying to clear it.
"""
- class ReferenceCycleInDel(object):
+ class ReferenceCycleInDel:
"""
An object that not only contains a reference cycle, but creates new
cycles whenever it's garbage-collected and its __del__ runs
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 975f6ad5d..753258c13 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -3,8 +3,6 @@ Back compatibility utils module. It will import the appropriate
set of tools
"""
-from __future__ import division, absolute_import, print_function
-
import warnings
# 2018-04-04, numpy 1.15.0 ImportWarning
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index 521208c36..af3730df1 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import pytest
import weakref
@@ -37,7 +35,7 @@ else:
reason="ctypes not available in this python")
@pytest.mark.skipif(sys.platform == 'cygwin',
reason="Known to fail on cygwin")
-class TestLoadLibrary(object):
+class TestLoadLibrary:
def test_basic(self):
try:
# Should succeed
@@ -63,7 +61,7 @@ class TestLoadLibrary(object):
print(msg)
-class TestNdpointer(object):
+class TestNdpointer:
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
@@ -130,7 +128,7 @@ class TestNdpointer(object):
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
-class TestNdpointerCFunc(object):
+class TestNdpointerCFunc:
def test_arguments(self):
""" Test that arguments are coerced from arrays """
c_forward_pointer.restype = ctypes.c_void_p
@@ -186,7 +184,7 @@ class TestNdpointerCFunc(object):
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
-class TestAsArray(object):
+class TestAsArray:
def test_array(self):
from ctypes import c_int
@@ -277,7 +275,7 @@ class TestAsArray(object):
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
-class TestAsCtypesType(object):
+class TestAsCtypesType:
""" Test conversion from dtypes to ctypes types """
def test_scalar(self):
dt = np.dtype('<u2')
diff --git a/numpy/tests/test_matlib.py b/numpy/tests/test_matlib.py
index 38a7e39df..0e93c4848 100644
--- a/numpy/tests/test_matlib.py
+++ b/numpy/tests/test_matlib.py
@@ -1,13 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
-# As we are testing matrices, we ignore its PendingDeprecationWarnings
-try:
- import pytest
- pytestmark = pytest.mark.filterwarnings(
- 'ignore:the matrix subclass is not:PendingDeprecationWarning')
-except ImportError:
- pass
-
import numpy as np
import numpy.matlib
from numpy.testing import assert_array_equal, assert_
diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py
index 7fac8fd22..916ab9383 100644
--- a/numpy/tests/test_numpy_version.py
+++ b/numpy/tests/test_numpy_version.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import re
import numpy as np
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index e3621c0fd..fb7ec5d83 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import subprocess
import pkgutil
@@ -30,9 +28,6 @@ def check_dir(module, module_name=None):
return results
-@pytest.mark.skipif(
- sys.version_info[0] < 3,
- reason="NumPy exposes slightly different functions on Python 2")
def test_numpy_namespace():
# None of these objects are publicly documented to be part of the main
# NumPy namespace (some are useful though, others need to be cleaned up)
@@ -50,7 +45,6 @@ def test_numpy_namespace():
'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose',
'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap',
'get_include': 'numpy.lib.utils.get_include',
- 'int_asbuffer': 'numpy.core._multiarray_umath.int_asbuffer',
'mafromtxt': 'numpy.lib.npyio.mafromtxt',
'ndfromtxt': 'numpy.lib.npyio.ndfromtxt',
'recfromcsv': 'numpy.lib.npyio.recfromcsv',
@@ -100,6 +94,12 @@ def test_import_lazy_import(name):
assert name in dir(np)
+def test_dir_testing():
+ """Assert that output of dir has only one "testing/tester"
+ attribute without duplicate"""
+ assert len(dir(np)) == len(set(dir(np)))
+
+
def test_numpy_linalg():
bad_results = check_dir(np.linalg)
assert bad_results == {}
@@ -228,7 +228,6 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
"distutils.command.install_data",
"distutils.command.install_headers",
"distutils.command.sdist",
- "distutils.compat",
"distutils.conv_template",
"distutils.core",
"distutils.extension",
@@ -298,15 +297,8 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
"ma.timer_comparison",
"matrixlib",
"matrixlib.defmatrix",
- "random.bit_generator",
- "random.bounded_integers",
- "random.common",
- "random.generator",
- "random.mt19937",
"random.mtrand",
- "random.pcg64",
- "random.philox",
- "random.sfc64",
+ "random.bit_generator",
"testing.print_coercion_tables",
"testing.utils",
]]
@@ -394,7 +386,7 @@ SKIP_LIST_2 = [
'numpy.matlib.fft',
'numpy.matlib.random',
'numpy.matlib.ctypeslib',
- 'numpy.matlib.ma'
+ 'numpy.matlib.ma',
]
diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py
index e378d1463..860832be8 100644
--- a/numpy/tests/test_reloading.py
+++ b/numpy/tests/test_reloading.py
@@ -1,14 +1,7 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
-
from numpy.testing import assert_raises, assert_, assert_equal
from numpy.compat import pickle
-if sys.version_info[:2] >= (3, 4):
- from importlib import reload
-else:
- from imp import reload
+from importlib import reload
def test_numpy_reloading():
# gh-7844. Also check that relevant globals retain their identity.
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index e42dc25f9..a0f2ba70a 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -2,8 +2,6 @@
Test that we can run executable scripts that have been installed with numpy.
"""
-from __future__ import division, print_function, absolute_import
-
import sys
import os
import pytest
@@ -11,8 +9,7 @@ from os.path import join as pathjoin, isfile, dirname
import subprocess
import numpy as np
-from numpy.compat.py3k import basestring
-from numpy.testing import assert_, assert_equal
+from numpy.testing import assert_equal
is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py
index f5560a099..d7a6d880c 100644
--- a/numpy/tests/test_warnings.py
+++ b/numpy/tests/test_warnings.py
@@ -2,77 +2,73 @@
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all.
"""
-from __future__ import division, absolute_import, print_function
-
-import sys
import pytest
-if sys.version_info >= (3, 4):
- from pathlib import Path
- import ast
- import tokenize
- import numpy
-
- class ParseCall(ast.NodeVisitor):
- def __init__(self):
- self.ls = []
-
- def visit_Attribute(self, node):
- ast.NodeVisitor.generic_visit(self, node)
- self.ls.append(node.attr)
-
- def visit_Name(self, node):
- self.ls.append(node.id)
+from pathlib import Path
+import ast
+import tokenize
+import numpy
+class ParseCall(ast.NodeVisitor):
+ def __init__(self):
+ self.ls = []
- class FindFuncs(ast.NodeVisitor):
- def __init__(self, filename):
- super().__init__()
- self.__filename = filename
+ def visit_Attribute(self, node):
+ ast.NodeVisitor.generic_visit(self, node)
+ self.ls.append(node.attr)
- def visit_Call(self, node):
- p = ParseCall()
- p.visit(node.func)
- ast.NodeVisitor.generic_visit(self, node)
+ def visit_Name(self, node):
+ self.ls.append(node.id)
- if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
- if node.args[0].s == "ignore":
- raise AssertionError(
- "ignore filter should not be used; found in "
- "{} on line {}".format(self.__filename, node.lineno))
- if p.ls[-1] == 'warn' and (
- len(p.ls) == 1 or p.ls[-2] == 'warnings'):
+class FindFuncs(ast.NodeVisitor):
+ def __init__(self, filename):
+ super().__init__()
+ self.__filename = filename
- if "testing/tests/test_warnings.py" == self.__filename:
- # This file
- return
+ def visit_Call(self, node):
+ p = ParseCall()
+ p.visit(node.func)
+ ast.NodeVisitor.generic_visit(self, node)
- # See if stacklevel exists:
- if len(node.args) == 3:
- return
- args = {kw.arg for kw in node.keywords}
- if "stacklevel" in args:
- return
+ if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
+ if node.args[0].s == "ignore":
raise AssertionError(
"warnings should have an appropriate stacklevel; found in "
"{} on line {}".format(self.__filename, node.lineno))
+ if p.ls[-1] == 'warn' and (
+ len(p.ls) == 1 or p.ls[-2] == 'warnings'):
+
+ if "testing/tests/test_warnings.py" == self.__filename:
+ # This file
+ return
+
+ # See if stacklevel exists:
+ if len(node.args) == 3:
+ return
+ args = {kw.arg for kw in node.keywords}
+ if "stacklevel" in args:
+ return
+ raise AssertionError(
+ "warnings should have an appropriate stacklevel; found in "
+ "{} on line {}".format(self.__filename, node.lineno))
+
- @pytest.mark.slow
- def test_warning_calls():
- # combined "ignore" and stacklevel error
- base = Path(numpy.__file__).parent
+@pytest.mark.slow
+def test_warning_calls():
+ # combined "ignore" and stacklevel error
+ base = Path(numpy.__file__).parent
- for path in base.rglob("*.py"):
- if base / "testing" in path.parents:
- continue
- if path == base / "__init__.py":
- continue
- if path == base / "random" / "__init__.py":
- continue
- # use tokenize to auto-detect encoding on systems where no
- # default encoding is defined (e.g. LANG='C')
- with tokenize.open(str(path)) as file:
- tree = ast.parse(file.read())
- FindFuncs(path).visit(tree)
+ for path in base.rglob("*.py"):
+ if base / "testing" in path.parents:
+ continue
+ if path == base / "__init__.py":
+ continue
+ if path == base / "random" / "__init__.py":
+ continue
+ # use tokenize to auto-detect encoding on systems where no
+ # default encoding is defined (e.g. LANG='C')
+ with tokenize.open(str(path)) as file:
+ tree = ast.parse(file.read())
+ FindFuncs(path).visit(tree)