summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.appveyor.yml6
-rw-r--r--.circleci/config.yml10
-rw-r--r--.dependabot/config.yml5
-rw-r--r--.github/CONTRIBUTING.md6
-rw-r--r--.lgtm.yml3
-rw-r--r--.mailmap9
-rw-r--r--.travis.yml8
-rw-r--r--INSTALL.rst.txt7
-rw-r--r--MANIFEST.in8
-rw-r--r--azure-pipelines.yml22
-rw-r--r--changelog/13829.enhancement.rst6
-rw-r--r--doc/DISTUTILS.rst.txt2
-rw-r--r--doc/HOWTO_RELEASE.rst.txt77
-rw-r--r--doc/Makefile56
-rw-r--r--doc/Py3K.rst.txt40
-rw-r--r--doc/RELEASE_WALKTHROUGH.rst.txt44
-rw-r--r--doc/changelog/1.16.5-changelog.rst54
-rw-r--r--doc/changelog/1.17.0-changelog.rst42
-rw-r--r--doc/changelog/1.17.1-changelog.rst55
-rw-r--r--doc/changelog/1.17.2-changelog.rst28
-rw-r--r--doc/neps/index.rst.tmpl2
-rw-r--r--doc/neps/nep-0000.rst2
-rw-r--r--doc/neps/nep-0019-rng-policy.rst2
-rw-r--r--doc/neps/nep-0021-advanced-indexing.rst2
-rw-r--r--doc/neps/nep-0024-missing-data-2.rst2
-rw-r--r--doc/neps/nep-0028-website-redesign.rst334
-rw-r--r--doc/neps/nep-0029-deprecation_policy.rst314
-rw-r--r--doc/neps/nep-0030-duck-array-protocol.rst183
-rw-r--r--doc/neps/nep-0032-remove-financial-functions.rst214
-rw-r--r--doc/neps/nep-template.rst7
-rw-r--r--doc/records.rst.txt6
-rw-r--r--doc/release/1.18.0-notes.rst43
-rw-r--r--doc/release/time_based_proposal.rst129
-rw-r--r--doc/release/upcoming_changes/10151.improvement.rst9
-rw-r--r--doc/release/upcoming_changes/12284.new_feature.rst5
-rw-r--r--doc/release/upcoming_changes/13605.deprecation.rst9
-rw-r--r--doc/release/upcoming_changes/13610.improvement.rst5
-rw-r--r--doc/release/upcoming_changes/13899.change.rst4
-rw-r--r--doc/release/upcoming_changes/14036.deprecation.rst4
-rw-r--r--doc/release/upcoming_changes/14036.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14039.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14100.expired.rst3
-rw-r--r--doc/release/upcoming_changes/14181.deprecation.rst3
-rw-r--r--doc/release/upcoming_changes/14248.change.rst10
-rw-r--r--doc/release/upcoming_changes/14255.improvement.rst4
-rw-r--r--doc/release/upcoming_changes/14256.expired.rst3
-rw-r--r--doc/release/upcoming_changes/14259.expired.rst6
-rw-r--r--doc/release/upcoming_changes/14325.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14335.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14393.c_api.rst5
-rw-r--r--doc/release/upcoming_changes/14464.improvement.rst6
-rw-r--r--doc/release/upcoming_changes/14498.change.rst7
-rw-r--r--doc/release/upcoming_changes/14501.improvement.rst6
-rw-r--r--doc/release/upcoming_changes/14510.compatibility.rst12
-rw-r--r--doc/release/upcoming_changes/14518.change.rst18
-rw-r--r--doc/release/upcoming_changes/14567.expired.rst5
-rw-r--r--doc/release/upcoming_changes/14583.expired.rst2
-rw-r--r--doc/release/upcoming_changes/README.rst55
-rw-r--r--doc/release/upcoming_changes/template.rst38
-rw-r--r--doc/source/_static/numpy_logo.pngbin0 -> 6103 bytes
-rw-r--r--doc/source/_templates/autosummary/base.rst14
-rw-r--r--doc/source/_templates/indexsidebar.html1
-rw-r--r--doc/source/_templates/layout.html10
-rw-r--r--doc/source/conf.py31
-rw-r--r--doc/source/dev/development_environment.rst29
-rw-r--r--doc/source/dev/development_workflow.rst38
-rw-r--r--doc/source/dev/governance/people.rst2
-rw-r--r--doc/source/dev/index.rst27
-rw-r--r--doc/source/docs/howto_build_docs.rst2
-rw-r--r--doc/source/reference/arrays.classes.rst2
-rw-r--r--doc/source/reference/arrays.datetime.rst161
-rw-r--r--doc/source/reference/arrays.nditer.rst81
-rw-r--r--doc/source/reference/c-api/array.rst35
-rw-r--r--doc/source/reference/c-api/ufunc.rst6
-rw-r--r--doc/source/reference/random/bit_generators/mt19937.rst6
-rw-r--r--doc/source/reference/random/bit_generators/pcg64.rst4
-rw-r--r--doc/source/reference/random/bit_generators/philox.rst4
-rw-r--r--doc/source/reference/random/bit_generators/sfc64.rst4
-rw-r--r--doc/source/reference/random/entropy.rst6
-rw-r--r--doc/source/reference/random/generator.rst84
-rw-r--r--doc/source/reference/random/index.rst8
-rw-r--r--doc/source/reference/random/legacy.rst18
-rw-r--r--doc/source/reference/random/new-or-different.rst3
-rw-r--r--doc/source/reference/routines.ma.rst11
-rw-r--r--doc/source/reference/routines.testing.rst10
-rw-r--r--doc/source/reference/ufuncs.rst69
-rw-r--r--doc/source/release.rst104
-rw-r--r--doc/source/release/1.10.0-notes.rst (renamed from doc/release/1.10.0-notes.rst)0
-rw-r--r--doc/source/release/1.10.1-notes.rst (renamed from doc/release/1.10.1-notes.rst)0
-rw-r--r--doc/source/release/1.10.2-notes.rst (renamed from doc/release/1.10.2-notes.rst)0
-rw-r--r--doc/source/release/1.10.3-notes.rst (renamed from doc/release/1.10.3-notes.rst)0
-rw-r--r--doc/source/release/1.10.4-notes.rst (renamed from doc/release/1.10.4-notes.rst)0
-rw-r--r--doc/source/release/1.11.0-notes.rst (renamed from doc/release/1.11.0-notes.rst)0
-rw-r--r--doc/source/release/1.11.1-notes.rst (renamed from doc/release/1.11.1-notes.rst)0
-rw-r--r--doc/source/release/1.11.2-notes.rst (renamed from doc/release/1.11.2-notes.rst)0
-rw-r--r--doc/source/release/1.11.3-notes.rst (renamed from doc/release/1.11.3-notes.rst)0
-rw-r--r--doc/source/release/1.12.0-notes.rst (renamed from doc/release/1.12.0-notes.rst)0
-rw-r--r--doc/source/release/1.12.1-notes.rst (renamed from doc/release/1.12.1-notes.rst)0
-rw-r--r--doc/source/release/1.13.0-notes.rst (renamed from doc/release/1.13.0-notes.rst)0
-rw-r--r--doc/source/release/1.13.1-notes.rst (renamed from doc/release/1.13.1-notes.rst)0
-rw-r--r--doc/source/release/1.13.2-notes.rst (renamed from doc/release/1.13.2-notes.rst)0
-rw-r--r--doc/source/release/1.13.3-notes.rst (renamed from doc/release/1.13.3-notes.rst)0
-rw-r--r--doc/source/release/1.14.0-notes.rst (renamed from doc/release/1.14.0-notes.rst)0
-rw-r--r--doc/source/release/1.14.1-notes.rst (renamed from doc/release/1.14.1-notes.rst)0
-rw-r--r--doc/source/release/1.14.2-notes.rst (renamed from doc/release/1.14.2-notes.rst)0
-rw-r--r--doc/source/release/1.14.3-notes.rst (renamed from doc/release/1.14.3-notes.rst)0
-rw-r--r--doc/source/release/1.14.4-notes.rst (renamed from doc/release/1.14.4-notes.rst)0
-rw-r--r--doc/source/release/1.14.5-notes.rst (renamed from doc/release/1.14.5-notes.rst)0
-rw-r--r--doc/source/release/1.14.6-notes.rst (renamed from doc/release/1.14.6-notes.rst)0
-rw-r--r--doc/source/release/1.15.0-notes.rst (renamed from doc/release/1.15.0-notes.rst)0
-rw-r--r--doc/source/release/1.15.1-notes.rst (renamed from doc/release/1.15.1-notes.rst)0
-rw-r--r--doc/source/release/1.15.2-notes.rst (renamed from doc/release/1.15.2-notes.rst)0
-rw-r--r--doc/source/release/1.15.3-notes.rst (renamed from doc/release/1.15.3-notes.rst)0
-rw-r--r--doc/source/release/1.15.4-notes.rst (renamed from doc/release/1.15.4-notes.rst)0
-rw-r--r--doc/source/release/1.16.0-notes.rst (renamed from doc/release/1.16.0-notes.rst)0
-rw-r--r--doc/source/release/1.16.1-notes.rst (renamed from doc/release/1.16.1-notes.rst)0
-rw-r--r--doc/source/release/1.16.2-notes.rst (renamed from doc/release/1.16.2-notes.rst)0
-rw-r--r--doc/source/release/1.16.3-notes.rst (renamed from doc/release/1.16.3-notes.rst)0
-rw-r--r--doc/source/release/1.16.4-notes.rst (renamed from doc/release/1.16.4-notes.rst)0
-rw-r--r--doc/source/release/1.16.5-notes.rst68
-rw-r--r--doc/source/release/1.17.0-notes.rst (renamed from doc/release/1.17.0-notes.rst)279
-rw-r--r--doc/source/release/1.17.1-notes.rst73
-rw-r--r--doc/source/release/1.17.2-notes.rst49
-rw-r--r--doc/source/release/1.18.0-notes.rst8
-rw-r--r--doc/source/release/1.3.0-notes.rst (renamed from doc/release/1.3.0-notes.rst)0
-rw-r--r--doc/source/release/1.4.0-notes.rst (renamed from doc/release/1.4.0-notes.rst)0
-rw-r--r--doc/source/release/1.5.0-notes.rst (renamed from doc/release/1.5.0-notes.rst)0
-rw-r--r--doc/source/release/1.6.0-notes.rst (renamed from doc/release/1.6.0-notes.rst)0
-rw-r--r--doc/source/release/1.6.1-notes.rst (renamed from doc/release/1.6.1-notes.rst)0
-rw-r--r--doc/source/release/1.6.2-notes.rst (renamed from doc/release/1.6.2-notes.rst)0
-rw-r--r--doc/source/release/1.7.0-notes.rst (renamed from doc/release/1.7.0-notes.rst)0
-rw-r--r--doc/source/release/1.7.1-notes.rst (renamed from doc/release/1.7.1-notes.rst)0
-rw-r--r--doc/source/release/1.7.2-notes.rst (renamed from doc/release/1.7.2-notes.rst)0
-rw-r--r--doc/source/release/1.8.0-notes.rst (renamed from doc/release/1.8.0-notes.rst)0
-rw-r--r--doc/source/release/1.8.1-notes.rst (renamed from doc/release/1.8.1-notes.rst)0
-rw-r--r--doc/source/release/1.8.2-notes.rst (renamed from doc/release/1.8.2-notes.rst)0
-rw-r--r--doc/source/release/1.9.0-notes.rst (renamed from doc/release/1.9.0-notes.rst)0
-rw-r--r--doc/source/release/1.9.1-notes.rst (renamed from doc/release/1.9.1-notes.rst)0
-rw-r--r--doc/source/release/1.9.2-notes.rst (renamed from doc/release/1.9.2-notes.rst)0
-rw-r--r--doc/source/release/template.rst (renamed from doc/release/template.rst)2
-rw-r--r--doc/source/user/basics.io.genfromtxt.rst14
-rw-r--r--doc/source/user/building.rst11
-rw-r--r--doc/source/user/c-info.beyond-basics.rst7
-rw-r--r--doc/source/user/c-info.how-to-extend.rst8
-rw-r--r--doc/source/user/c-info.python-as-glue.rst10
-rw-r--r--doc/source/user/quickstart.rst8
-rw-r--r--numpy/__init__.pxd978
-rw-r--r--numpy/__init__.py43
-rw-r--r--numpy/_pytesttester.py11
-rw-r--r--numpy/core/__init__.py9
-rw-r--r--numpy/core/_add_newdocs.py18
-rw-r--r--numpy/core/_dtype.py35
-rw-r--r--numpy/core/_exceptions.py53
-rw-r--r--numpy/core/_internal.py2
-rw-r--r--numpy/core/arrayprint.py51
-rw-r--r--numpy/core/code_generators/genapi.py3
-rw-r--r--numpy/core/code_generators/generate_umath.py18
-rw-r--r--numpy/core/fromnumeric.py85
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h6
-rw-r--r--numpy/core/include/numpy/npy_common.h8
-rw-r--r--numpy/core/include/numpy/npy_math.h115
-rw-r--r--numpy/core/info.py87
-rw-r--r--numpy/core/numeric.py53
-rw-r--r--numpy/core/overrides.py20
-rw-r--r--numpy/core/records.py18
-rw-r--r--numpy/core/setup.py37
-rw-r--r--numpy/core/setup_common.py55
-rw-r--r--numpy/core/shape_base.py15
-rw-r--r--numpy/core/src/common/npy_partition.h.src3
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src65
-rw-r--r--numpy/core/src/multiarray/alloc.c16
-rw-r--r--numpy/core/src/multiarray/arrayobject.c52
-rw-r--r--numpy/core/src/multiarray/compiled_base.c31
-rw-r--r--numpy/core/src/multiarray/convert.c29
-rw-r--r--numpy/core/src/multiarray/ctors.c234
-rw-r--r--numpy/core/src/multiarray/datetime.c23
-rw-r--r--numpy/core/src/multiarray/datetime_busday.c12
-rw-r--r--numpy/core/src/multiarray/descriptor.c9
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c2
-rw-r--r--numpy/core/src/multiarray/getset.c2
-rw-r--r--numpy/core/src/multiarray/item_selection.c39
-rw-r--r--numpy/core/src/multiarray/item_selection.h4
-rw-r--r--numpy/core/src/multiarray/iterators.c36
-rw-r--r--numpy/core/src/multiarray/iterators.h3
-rw-r--r--numpy/core/src/multiarray/mapping.c4
-rw-r--r--numpy/core/src/multiarray/methods.c10
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c21
-rw-r--r--numpy/core/src/multiarray/nditer_api.c5
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c24
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c7
-rw-r--r--numpy/core/src/multiarray/number.c3
-rw-r--r--numpy/core/src/multiarray/refcount.c32
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src30
-rw-r--r--numpy/core/src/multiarray/shape.c16
-rw-r--r--numpy/core/src/npymath/npy_math_complex.c.src36
-rw-r--r--numpy/core/src/npymath/npy_math_internal.h.src41
-rw-r--r--numpy/core/src/npysort/radixsort.c.src4
-rw-r--r--numpy/core/src/umath/_rational_tests.c.src4
-rw-r--r--numpy/core/src/umath/cpuid.c22
-rw-r--r--numpy/core/src/umath/loops.c.src82
-rw-r--r--numpy/core/src/umath/loops.h.src4
-rw-r--r--numpy/core/src/umath/matmul.c.src47
-rw-r--r--numpy/core/src/umath/reduction.c8
-rw-r--r--numpy/core/src/umath/scalarmath.c.src60
-rw-r--r--numpy/core/src/umath/simd.inc.src379
-rw-r--r--numpy/core/src/umath/ufunc_object.c18
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c3
-rw-r--r--numpy/core/tests/data/umath-validation-set-README15
-rw-r--r--numpy/core/tests/data/umath-validation-set-cos707
-rw-r--r--numpy/core/tests/data/umath-validation-set-exp135
-rw-r--r--numpy/core/tests/data/umath-validation-set-log118
-rw-r--r--numpy/core/tests/data/umath-validation-set-sin707
-rw-r--r--numpy/core/tests/test__exceptions.py42
-rw-r--r--numpy/core/tests/test_arrayprint.py11
-rw-r--r--numpy/core/tests/test_deprecations.py90
-rw-r--r--numpy/core/tests/test_dtype.py32
-rw-r--r--numpy/core/tests/test_indexing.py13
-rw-r--r--numpy/core/tests/test_longdouble.py38
-rw-r--r--numpy/core/tests/test_multiarray.py69
-rw-r--r--numpy/core/tests/test_numeric.py29
-rw-r--r--numpy/core/tests/test_numerictypes.py29
-rw-r--r--numpy/core/tests/test_records.py42
-rw-r--r--numpy/core/tests/test_regression.py41
-rw-r--r--numpy/core/tests/test_scalarmath.py28
-rw-r--r--numpy/core/tests/test_ufunc.py15
-rw-r--r--numpy/core/tests/test_umath.py47
-rw-r--r--numpy/core/tests/test_umath_accuracy.py54
-rw-r--r--numpy/ctypeslib.py17
-rw-r--r--numpy/distutils/__init__.py27
-rw-r--r--numpy/distutils/__version__.py6
-rw-r--r--numpy/distutils/ccompiler.py13
-rw-r--r--numpy/distutils/command/build.py13
-rw-r--r--numpy/distutils/command/build_clib.py14
-rw-r--r--numpy/distutils/command/build_ext.py15
-rw-r--r--numpy/distutils/command/build_src.py17
-rw-r--r--numpy/distutils/fcompiler/environment.py12
-rw-r--r--numpy/distutils/info.py6
-rw-r--r--numpy/distutils/log.py2
-rw-r--r--numpy/distutils/misc_util.py52
-rw-r--r--numpy/distutils/system_info.py14
-rw-r--r--numpy/distutils/tests/test_fcompiler.py34
-rw-r--r--numpy/doc/broadcasting.py3
-rw-r--r--numpy/doc/dispatch.py6
-rw-r--r--numpy/doc/subclassing.py3
-rw-r--r--numpy/f2py/cfuncs.py2
-rw-r--r--numpy/f2py/info.py6
-rwxr-xr-x[-rw-r--r--]numpy/f2py/rules.py9
-rw-r--r--numpy/f2py/setup.py2
-rw-r--r--numpy/f2py/src/fortranobject.c7
-rw-r--r--numpy/fft/README.md5
-rw-r--r--numpy/fft/__init__.py190
-rw-r--r--numpy/fft/_pocketfft.c (renamed from numpy/fft/pocketfft.c)8
-rw-r--r--numpy/fft/_pocketfft.py (renamed from numpy/fft/pocketfft.py)35
-rw-r--r--numpy/fft/info.py187
-rw-r--r--numpy/fft/setup.py4
-rw-r--r--numpy/fft/tests/test_pocketfft.py130
-rw-r--r--numpy/lib/__init__.py26
-rw-r--r--numpy/lib/_iotools.py13
-rw-r--r--numpy/lib/arraypad.py93
-rw-r--r--numpy/lib/arraysetops.py6
-rw-r--r--numpy/lib/financial.py33
-rw-r--r--numpy/lib/format.py3
-rw-r--r--numpy/lib/function_base.py37
-rw-r--r--numpy/lib/info.py160
-rw-r--r--numpy/lib/mixins.py4
-rw-r--r--numpy/lib/nanfunctions.py2
-rw-r--r--numpy/lib/npyio.py28
-rw-r--r--numpy/lib/recfunctions.py78
-rw-r--r--numpy/lib/shape_base.py7
-rw-r--r--numpy/lib/tests/test_arraypad.py56
-rw-r--r--numpy/lib/tests/test_arraysetops.py7
-rw-r--r--numpy/lib/tests/test_financial.py6
-rw-r--r--numpy/lib/tests/test_function_base.py24
-rw-r--r--numpy/lib/tests/test_index_tricks.py18
-rw-r--r--numpy/lib/tests/test_io.py11
-rw-r--r--numpy/lib/tests/test_recfunctions.py70
-rw-r--r--numpy/lib/twodim_base.py2
-rw-r--r--numpy/lib/type_check.py10
-rw-r--r--numpy/lib/utils.py94
-rw-r--r--numpy/linalg/__init__.py110
-rw-r--r--numpy/linalg/info.py37
-rw-r--r--numpy/linalg/linalg.py20
-rw-r--r--numpy/linalg/umath_linalg.c.src2
-rw-r--r--numpy/ma/core.py113
-rw-r--r--numpy/ma/extras.py7
-rw-r--r--numpy/ma/mrecords.py2
-rw-r--r--numpy/ma/tests/test_core.py7
-rw-r--r--numpy/ma/version.py14
-rw-r--r--numpy/matlib.py2
-rw-r--r--numpy/polynomial/polyutils.py10
-rw-r--r--numpy/random/__init__.py6
-rw-r--r--numpy/random/_pickle.py6
-rw-r--r--numpy/random/bit_generator.pxd6
-rw-r--r--numpy/random/bit_generator.pyx4
-rw-r--r--numpy/random/common.pxd2
-rw-r--r--numpy/random/common.pyx2
-rw-r--r--numpy/random/entropy.pyx155
-rw-r--r--numpy/random/generator.pyx54
-rw-r--r--numpy/random/info.py5
-rw-r--r--numpy/random/legacy_distributions.pxd2
-rw-r--r--numpy/random/mt19937.pyx7
-rw-r--r--numpy/random/mtrand.pyx25
-rw-r--r--numpy/random/setup.py18
-rw-r--r--numpy/random/src/bitgen.h (renamed from numpy/core/include/numpy/random/bitgen.h)0
-rw-r--r--numpy/random/src/distributions/distributions.c8
-rw-r--r--numpy/random/src/distributions/distributions.h25
-rw-r--r--numpy/random/src/distributions/random_hypergeometric.c2
-rw-r--r--numpy/random/src/entropy/entropy.c114
-rw-r--r--numpy/random/src/entropy/entropy.h14
-rw-r--r--numpy/random/src/legacy/legacy-distributions.c31
-rw-r--r--numpy/random/src/legacy/legacy-distributions.h7
-rw-r--r--numpy/random/src/philox/philox.h2
-rw-r--r--numpy/random/src/sfc64/sfc64.h2
-rw-r--r--numpy/random/tests/test_generator_mt19937.py58
-rw-r--r--numpy/random/tests/test_randomstate.py15
-rw-r--r--numpy/random/tests/test_randomstate_regression.py27
-rw-r--r--numpy/random/tests/test_smoke.py22
-rw-r--r--numpy/testing/_private/parameterized.py19
-rw-r--r--numpy/testing/_private/utils.py42
-rw-r--r--numpy/testing/decorators.py15
-rw-r--r--numpy/testing/noseclasses.py14
-rw-r--r--numpy/testing/nosetester.py19
-rwxr-xr-xnumpy/testing/print_coercion_tables.py40
-rw-r--r--numpy/testing/tests/test_utils.py40
-rw-r--r--numpy/testing/utils.py7
-rw-r--r--numpy/tests/test_public_api.py412
-rw-r--r--pavement.py3
-rw-r--r--pyproject.toml71
-rwxr-xr-xruntests.py14
-rwxr-xr-xsetup.py16
-rw-r--r--shippable.yml10
-rw-r--r--test_requirements.txt7
-rw-r--r--tools/ci/appveyor/requirements.txt6
-rwxr-xr-xtools/ci/test_all_newsfragments_used.py16
-rwxr-xr-xtools/cythonize.py27
-rw-r--r--tools/npy_tempita/compat3.py2
-rw-r--r--tools/openblas_support.py8
-rwxr-xr-xtools/pypy-test.sh4
-rwxr-xr-xtools/swig/test/testFarray.py2
-rwxr-xr-xtools/travis-before-install.sh8
-rwxr-xr-xtools/travis-test.sh25
-rw-r--r--tox.ini3
342 files changed, 8782 insertions, 3282 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index 079496d93..c8c1795c1 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -34,13 +34,11 @@ environment:
PYTHON_VERSION: 3.6
PYTHON_ARCH: 64
TEST_MODE: full
- INSTALL_PICKLE5: 1
- PYTHON: C:\Python37-x64
PYTHON_VERSION: 3.7
PYTHON_ARCH: 64
TEST_MODE: full
- INSTALL_PICKLE5: 1
init:
- "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%"
@@ -98,10 +96,8 @@ install:
# Upgrade to the latest pip.
- 'python -m pip install -U pip setuptools wheel'
- - if [%INSTALL_PICKLE5%]==[1] echo pickle5 >> tools/ci/appveyor/requirements.txt
-
# Install the numpy test dependencies.
- - 'pip install -U --timeout 5 --retries 2 -r tools/ci/appveyor/requirements.txt'
+ - 'pip install -U --timeout 5 --retries 2 -r test_requirements.txt'
build_script:
# Here, we add MinGW to the path to be able to link an OpenBLAS.dll
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 6b4ab812f..772c3fbfd 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -21,7 +21,7 @@ jobs:
python3 -m venv venv
ln -s $(which python3) venv/bin/python3.6
. venv/bin/activate
- pip install cython sphinx==1.8.5 matplotlib ipython
+ pip install cython sphinx==2.2.0 matplotlib ipython
sudo apt-get update
sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex
@@ -35,6 +35,14 @@ jobs:
pip install scipy
- run:
+ name: create release notes
+ command: |
+ . venv/bin/activate
+ pip install git+https://github.com/hawkowl/towncrier.git@master
+ VERSION=$(python -c "import setup; print(setup.VERSION)")
+ towncrier --version $VERSION --yes
+ ./tools/ci/test_all_newsfragments_used.py
+ - run:
name: build devdocs
command: |
. venv/bin/activate
diff --git a/.dependabot/config.yml b/.dependabot/config.yml
new file mode 100644
index 000000000..4d8d6f01e
--- /dev/null
+++ b/.dependabot/config.yml
@@ -0,0 +1,5 @@
+version: 1
+update_configs:
+ - package_manager: "python"
+ directory: "/"
+ update_schedule: "weekly"
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 01d9a537e..22113b913 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -16,12 +16,12 @@ Thanks for your interest in contributing code to numpy!
+ If this is your first time contributing to a project on GitHub, please read
through our
-[guide to contributing to numpy](https://docs.scipy.org/doc/numpy/dev/index.html)
+[guide to contributing to numpy](https://numpy.org/devdocs/dev/index.html)
+ If you have contributed to other projects on GitHub you can go straight to our
-[development workflow](https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html)
+[development workflow](https://numpy.org/devdocs/dev/development_workflow.html)
Either way, please be sure to follow our
-[convention for commit messages](https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message).
+[convention for commit messages](https://numpy.org/devdocs/dev/development_workflow.html#writing-the-commit-message).
If you are writing new C code, please follow the style described in
``doc/C_STYLE_GUIDE``.
diff --git a/.lgtm.yml b/.lgtm.yml
index e64deaba7..cc16544a3 100644
--- a/.lgtm.yml
+++ b/.lgtm.yml
@@ -16,6 +16,9 @@ extraction:
index:
build_command:
- python3 setup.py build
+ after_prepare:
+ - pip3 install --upgrade --user cython
+ - export PATH="$HOME/.local/bin:$PATH"
queries:
- include: py/file-not-closed
diff --git a/.mailmap b/.mailmap
index c30a2fc0a..9d7aaa3c4 100644
--- a/.mailmap
+++ b/.mailmap
@@ -22,6 +22,7 @@ Alex Thomas <alexthomas93@users.noreply.github.com> alexthomas93 <alexthomas93@u
Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
Alexander Belopolsky <abalkin@enlnt.com> sasha <sasha@localhost>
+Alexander Jung <kontakt@ajung.name> aleju <kontakt@ajung.name>
Alexander Shadchin <alexandr.shadchin@gmail.com> Alexandr Shadchin <alexandr.shadchin@gmail.com>
Alexander Shadchin <alexandr.shadchin@gmail.com> shadchin <alexandr.shadchin@gmail.com>
Allan Haldane <allan.haldane@gmail.com> ahaldane <ealloc@gmail.com>
@@ -30,6 +31,7 @@ Alyssa Quek <alyssaquek@gmail.com> alyssaq <alyssaquek@gmail.com>
Amir Sarabadani <ladsgroup@gmail.com> amir <ladsgroup@gmail.com>
Anatoly Techtonik <techtonik@gmail.com> anatoly techtonik <techtonik@gmail.com>
Andras Deak <deak.andris@gmail.com> adeak <adeak@users.noreply.github.com>
+Andrea Pattori <andrea.pattori@gmail.com> patto90 <andrea.pattori@gmail.com>
Andrei Kucharavy <ank@andreikucharavy.com> chiffa <ank@andreikucharavy.com>
Anne Archibald <peridot.faceted@gmail.com> aarchiba <peridot.faceted@gmail.com>
Anne Archibald <peridot.faceted@gmail.com> Anne Archibald <archibald@astron.nl>
@@ -122,6 +124,7 @@ Jeffrey Yancey <jeffrey@octane5.com> Jeff <3820914+jeffyancey@users.noreply.gith
Jeremy Lay <jlay80@gmail.com> jeremycl01 <jlay80@gmail.com>
Jérémie du Boisberranger <jeremie.du-boisberranger@inria.fr> jeremiedbb <34657725+jeremiedbb@users.noreply.github.com>
Jerome Kelleher <jerome.kelleher@ed.ac.uk> jeromekelleher <jerome.kelleher@ed.ac.uk>
+Johannes Hampp <johannes.hampp@zeu.uni-giessen.de> euronion <42553970+euronion@users.noreply.github.com>
Johannes Schönberger <hannesschoenberger@gmail.com> Johannes Schönberger <jschoenberger@demuc.de>
John Darbyshire <24256554+attack68@users.noreply.github.com> attack68 <24256554+attack68@users.noreply.github.com>
Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Joseph Fox-Rabinovitz <joseph.r.fox-rabinovitz@nasa.gov>
@@ -133,12 +136,14 @@ Julian Taylor <juliantaylor108@gmail.com> Julian Taylor <juliantaylor108@googlem
Julien Lhermitte <jrmlhermitte@gmail.com> Julien Lhermitte <lhermitte@bnl.gov>
Julien Schueller <julien.schueller@gmail.com> jschueller <julien.schueller@gmail.com>
Kai Striega <kaistriega@gmail.com> kai <kaistriega@gmail.com>
+Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega@gmail.com>
Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega+github@gmail.com>
Khaled Ben Abdallah Okuda <khaled.ben.okuda@gmail.com> KhaledTo <khaled.ben.okuda@gmail.com>
-Kiko Correoso <kikocorreoso@gmail.com> kikocorreoso <kikocorreoso@gmail.com>
-Kiko Correoso <kikocorreoso@gmail.com> <kikocorreoso@users.noreply.github.com>
+Kiko Correoso <kachine@protonmail.com> kikocorreoso <kikocorreoso@gmail.com>
+Kiko Correoso <kachine@protonmail.com> kikocorreoso <kikocorreoso@users.noreply.github.com>
Konrad Kapp <k_kapp@yahoo.com> k_kapp@yahoo.com <k_kapp@yahoo.com>
Kriti Singh <kritisingh1.ks@gmail.com> kritisingh1 <kritisingh1.ks@gmail.com>
+Kmol Yuan <pyslvs@gmail.com> Yuan <pyslvs@gmail.com>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <l.buitinck@esciencecenter.nl>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <L.J.Buitinck@uva.nl>
Lars Grüter <lagru@mailbox.org> Lars G <lagru@mailbox.org>
diff --git a/.travis.yml b/.travis.yml
index cb98356c4..714122957 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -10,7 +10,6 @@ addons:
apt:
packages: &common_packages
- gfortran
- - libatlas-dev
- libatlas-base-dev
# Speedup builds, particularly when USE_CHROOT=1
- eatmydata
@@ -21,7 +20,7 @@ cache:
env:
global:
- - OpenBLAS_version=0.3.7.dev
+ - OpenBLAS_version=0.3.7
- WHEELHOUSE_UPLOADER_USERNAME=travis.numpy
# The following is generated with the command:
# travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY
@@ -33,11 +32,14 @@ env:
python:
- 3.5
- 3.6
+ - 3.7
+ - 3.8-dev
matrix:
include:
- python: 3.7
env: INSTALL_PICKLE5=1
- - python: 3.5
+ - python: 3.6
+ dist: bionic
env: USE_DEBUG=1
addons:
apt:
diff --git a/INSTALL.rst.txt b/INSTALL.rst.txt
index 640ddafc7..bd2f4f92c 100644
--- a/INSTALL.rst.txt
+++ b/INSTALL.rst.txt
@@ -12,7 +12,7 @@ https://scipy.org/install.html.
Prerequisites
=============
-Building NumPy requires the following software installed:
+Building NumPy requires the following installed software:
1) For Python 3, Python__ 3.5.x or newer.
@@ -28,6 +28,7 @@ Building NumPy requires the following software installed:
2) Cython >= 0.29.2 (for development versions of numpy, not for released
versions)
+
3) pytest__ (optional) 1.15 or later
This is required for testing numpy, but not for using it.
@@ -91,7 +92,7 @@ installed then ``g77`` will be detected and used first. To explicitly select
Windows
-------
-On Windows, building from source can be difficult. Currently the most robust
+On Windows, building from source can be difficult. Currently, the most robust
option is to use the Intel compilers, or alternatively MSVC (the same version
as used to build Python itself) with Intel ifort. Intel itself maintains a
good `application note <https://software.intel.com/en-us/articles/numpyscipy-with-intel-mkl>`_
@@ -131,7 +132,7 @@ ATLAS) will also work.
Ubuntu/Debian
-------------
-For best performance a development package providing BLAS and CBLAS should be
+For best performance, a development package providing BLAS and CBLAS should be
installed. Some of the options available are:
- ``libblas-dev``: reference BLAS (not very optimized)
diff --git a/MANIFEST.in b/MANIFEST.in
index 7c8f3b6ef..7ab57eb8c 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -5,19 +5,23 @@
# Avoid using MANIFEST.in for that.
#
include MANIFEST.in
+include pyproject.toml
include pytest.ini
include *.txt
include README.md
include site.cfg.example
+include runtests.py
+include tox.ini
+include .coveragerc
+include test_requirements.txt
recursive-include numpy/random *.pyx *.pxd *.pyx.in *.pxd.in
+include numpy/__init__.pxd
# Add build support that should go in sdist, but not go in bdist/be installed
# Note that sub-directories that don't have __init__ are apparently not
# included by 'recursive-include', so list those separately
recursive-include numpy *
recursive-include numpy/_build_utils *
recursive-include numpy/linalg/lapack_lite *
-include runtests.py
-include tox.ini pytest.ini .coveragerc
recursive-include tools *
# Add sdist files whose use depends on local configuration.
include numpy/core/src/common/cblasfuncs.c
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index edb577cdb..0e97d42d6 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -8,7 +8,7 @@ trigger:
variables:
# OpenBLAS_version should be updated
# to match numpy-wheels repo
- OpenBLAS_version: 0.3.7.dev
+ OpenBLAS_version: 0.3.7
jobs:
- job: Linux_Python_36_32bit_full_with_asserts
@@ -21,14 +21,15 @@ jobs:
apt-get -y update && \
apt-get -y install python3.6-dev python3-pip locales python3-certifi && \
locale-gen fr_FR && update-locale && \
- pip3 install setuptools nose cython==0.29.0 pytest pytz pickle5 && \
apt-get -y install gfortran-5 wget && \
target=\$(python3 tools/openblas_support.py) && \
cp -r \$target/usr/local/lib/* /usr/lib && \
cp \$target/usr/local/include/* /usr/include && \
+ python3 -m pip install --user --upgrade pip setuptools && \
+ python3 -m pip install --user -r test_requirements.txt && \
python3 -m pip install . && \
F77=gfortran-5 F90=gfortran-5 \
- CFLAGS='-UNDEBUG -std=c99' python3 runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml && \
+ CFLAGS='-UNDEBUG -std=c99' python3 runtests.py -n --debug-configure --mode=full -- -rsx --junitxml=junit/test-results.xml && \
python3 tools/openblas_support.py --check_version $(OpenBLAS_version)"
displayName: 'Run 32-bit Ubuntu Docker Build / Tests'
- task: PublishTestResults@2
@@ -85,13 +86,15 @@ jobs:
displayName: 'install pre-built openblas'
- script: python -m pip install --upgrade pip setuptools wheel
displayName: 'Install tools'
- - script: python -m pip install cython nose pytz pytest pickle5 vulture docutils sphinx==1.8.5 numpydoc
+ - script: |
+ python -m pip install -r test_requirements.txt
+ python -m pip install vulture docutils sphinx==2.2.0 numpydoc
displayName: 'Install dependencies; some are optional to avoid test skips'
- script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'"
displayName: 'Check for unreachable code paths in Python modules'
# prefer usage of clang over gcc proper
# to match likely scenario on many user mac machines
- - script: python setup.py build -j 4 install
+ - script: python setup.py build -j 4 build_src -v install
displayName: 'Build NumPy'
env:
BLAS: None
@@ -140,13 +143,11 @@ jobs:
PYTHON_VERSION: '3.6'
PYTHON_ARCH: 'x64'
TEST_MODE: full
- INSTALL_PICKLE5: 1
BITS: 64
Python37-64bit-full:
PYTHON_VERSION: '3.7'
PYTHON_ARCH: 'x64'
TEST_MODE: full
- INSTALL_PICKLE5: 1
BITS: 64
steps:
- task: UsePythonVersion@0
@@ -156,11 +157,8 @@ jobs:
architecture: $(PYTHON_ARCH)
- script: python -m pip install --upgrade pip setuptools wheel
displayName: 'Install tools'
- - script: python -m pip install cython nose pytz pytest
+ - script: python -m pip install -r test_requirements.txt
displayName: 'Install dependencies; some are optional to avoid test skips'
- - script: if [%INSTALL_PICKLE5%]==[1] python -m pip install pickle5
- displayName: 'Install optional pickle5 backport (only for python3.6 and 3.7)'
-
- powershell: |
$pyversion = python -c "from __future__ import print_function; import sys; print(sys.version.split()[0])"
Write-Host "Python Version: $pyversion"
@@ -178,7 +176,6 @@ jobs:
# vs. manual setup.py and then runtests.py for testing only
- powershell: |
If ($(BITS) -eq 32) {
- $env:NPY_DISTUTILS_APPEND_FLAGS = 1
$env:CFLAGS = "-m32"
$env:LDFLAGS = "-m32"
$env:PATH = "C:\\tools\\mingw32\\bin;" + $env:PATH
@@ -221,3 +218,4 @@ jobs:
testResultsFiles: '**/test-*.xml'
testRunTitle: 'Publish test results for PyPy3'
failTaskOnFailedTests: true
+
diff --git a/changelog/13829.enhancement.rst b/changelog/13829.enhancement.rst
new file mode 100644
index 000000000..ede1b2a53
--- /dev/null
+++ b/changelog/13829.enhancement.rst
@@ -0,0 +1,6 @@
+Add ``axis`` argument for ``random.permutation`` and ``random.shuffle``
+-----------------------------------------------------------------------
+
+Previously the ``random.permutation`` and ``random.shuffle`` functions
+can only shuffle an array along the first axis; they now have a
+new argument ``axis`` which allows shuffle along a specified axis.
diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt
index eadde63f8..bcef82500 100644
--- a/doc/DISTUTILS.rst.txt
+++ b/doc/DISTUTILS.rst.txt
@@ -243,7 +243,7 @@ in writing setup scripts:
after processing all source generators, no extension module will
be built. This is the recommended way to conditionally define
extension modules. Source generator functions are called by the
- ``build_src`` command of ``numpy.distutils``.
+ ``build_src`` sub-command of ``numpy.distutils``.
For example, here is a typical source generator function::
diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt
index e2aea12b7..4b485c8b9 100644
--- a/doc/HOWTO_RELEASE.rst.txt
+++ b/doc/HOWTO_RELEASE.rst.txt
@@ -11,7 +11,7 @@ useful info can be found.
Source tree
-----------
-* INSTALL.txt
+* INSTALL.rst.txt
* release.sh
* pavement.py
@@ -197,17 +197,6 @@ best to read the pavement.py script.
.. note:: The following steps are repeated for the beta(s), release
candidates(s) and the final release.
-Check that docs can be built
-----------------------------
-Do::
-
- cd doc/
- make dist
-
-to check that the documentation is in a buildable state. See
-doc/HOWTO_BUILD_DOCS.rst.txt for more details and for how to update
-https://docs.scipy.org.
-
Check deprecations
------------------
Before the release branch is made, it should be checked that all deprecated
@@ -257,12 +246,19 @@ updated for a major release.
Check the release notes
-----------------------
-Check that the release notes are up-to-date.
+Use `towncrier`_ to build the release note, copy it to the proper name, and
+commit the changes. This will remove all the fragments from ``changelog/*.rst``
+and add ``doc/release/latest-note.rst`` which must be renamed with the proper
+version number::
+
+ python -mtowncrier --version "Numpy 1.11.0"
+ git mv doc/release/latest-note.rst doc/release/1.11.0-notes.rst
+ git commit -m"Create release note"
-Write or update the release notes in a file named for the release, such as
-``doc/release/1.11.0-notes.rst``.
+Check that the release notes are up-to-date.
-Mention at least the following:
+Update the release notes with a Highlights section. Mention some of the
+following:
- major new features
- deprecated and removed features
@@ -270,8 +266,7 @@ Mention at least the following:
- for SciPy, supported NumPy version(s)
- outlook for the near future
-Also make sure that as soon as the branch is made, there is a new release
-notes file in the master branch for the next release.
+.. _towncrier: https://github.com/hawkowl/towncrier
Update the release status and create a release "tag"
----------------------------------------------------
@@ -383,14 +378,24 @@ Build the changelog and notes for upload with::
paver write_release_and_log
-The tar-files and binary releases for distribution should be uploaded to SourceForge,
-together with the Release Notes and the Changelog. Uploading can be done
-through a web interface or, more efficiently, through scp/sftp/rsync as
-described in the SourceForge
-`upload guide <https://sourceforge.net/apps/trac/sourceforge/wiki/Release%20files%20for%20download>`_ (dead link).
-For example::
+Build and archive documentation
+-------------------------------
+Do::
- scp <filename> <username>,numpy@frs.sourceforge.net:/home/frs/project/n/nu/numpy/NumPy/<releasedir>/
+ cd doc/
+ make dist
+
+to check that the documentation is in a buildable state. Then, after tagging,
+create an archive of the documentation in the numpy/doc repo::
+
+ # This checks out github.com/numpy/doc and adds (``git add``) the
+ # documentation to the checked out repo.
+ make merge-doc
+ # Now edit the ``index.html`` file in the repo to reflect the new content,
+ # and commit the changes
+ git -C dist/merge commit -a "Add documentation for <version>"
+ # Push to numpy/doc repo
+ git -C push
Update PyPI
-----------
@@ -443,28 +448,6 @@ you released you can push the tag and release commit up to github::
where ``upstream`` points to the main https://github.com/numpy/numpy.git
repository.
-Update docs.scipy.org
----------------------
-
-All documentation for a release can be updated on https://docs.scipy.org/ with:
-
- make dist
- make upload USERNAME=<yourname> RELEASE=1.11.0
-
-Note that ``<username>`` must have SSH credentials on the server. If you don't
-have those, ask someone who does (the list currently includes @rgommers,
-@juliantaylor and @pv).
-
-Also rebuild and upload ``docs.scipy.org`` front page, if the release
-series is a new one. The front page sources have their own repo:
-https://github.com/scipy/docs.scipy.org. Do the following:
-
-- Update ``index.rst`` for the new version.
-- ``make dist``
-- Check that the built documentation is OK.
-- ``touch output-is-fine``
-- ``make upload USERNAME=<username> RELEASE=1.x.y``
-
Update scipy.org
----------------
diff --git a/doc/Makefile b/doc/Makefile
index c7ebd515b..3c32cb811 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -14,6 +14,10 @@ PYTHON = python$(PYVER)
SPHINXOPTS ?=
SPHINXBUILD ?= LANG=C sphinx-build
PAPER ?=
+# For merging a documentation archive into a git checkout of numpy/doc
+# Turn a tag like v1.18.0 into 1.18
+# Use sed -n -e 's/patttern/match/p' to return a blank value if no match
+TAG ?= $(shell git describe --tag | sed -n -e's,v\([1-9]\.[0-9]*\)\.[0-9].*,\1,p')
FILES=
@@ -24,7 +28,8 @@ ALLSPHINXOPTS = -WT --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \
$(SPHINXOPTS) source
.PHONY: help clean html web pickle htmlhelp latex changes linkcheck \
- dist dist-build gitwash-update version-check
+ dist dist-build gitwash-update version-check html-build latex-build \
+ merge-doc
#------------------------------------------------------------------------------
@@ -40,6 +45,7 @@ help:
@echo " dist PYVER=... to make a distribution-ready tree"
@echo " gitwash-update GITWASH=path/to/gitwash update gitwash developer docs"
@echo " upload USERNAME=... RELEASE=... to upload built docs to docs.scipy.org"
+ @echo " merge-doc TAG=... to clone numpy/doc and archive documentation into it"
clean:
-rm -rf build/*
@@ -72,13 +78,16 @@ UPLOAD_DIR=/srv/docs_scipy_org/doc/numpy-$(RELEASE)
DIST_VARS=SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH) python$(PYVER) `which sphinx-build`" PYTHON="PYTHONPATH=$(INSTALL_PPH) python$(PYVER)"
-NUMPYVER:=$(shell $(PYTHON) -c "import numpy; print(numpy.version.git_revision[:10])")
+NUMPYVER:=$(shell $(PYTHON) -c "import numpy; print(numpy.version.git_revision[:10])" 2>/dev/null)
GITVER ?= $(shell cd ..; $(PYTHON) -c "from setup import git_version; \
print(git_version()[:10])")
version-check:
ifeq "$(GITVER)" "Unknown"
# @echo sdist build with unlabeled sources
+else ifeq ("", "$(NUMPYVER)")
+ @echo numpy not found, cannot build documentation without successful \"import numpy\"
+ @exit 1
else ifneq ($(NUMPYVER),$(GITVER))
@echo installed numpy $(NUMPYVER) != current repo git version \'$(GITVER)\'
@echo use '"make dist"' or '"GITVER=$(NUMPYVER) make $(MAKECMDGOALS) ..."'
@@ -89,13 +98,14 @@ else
endif
-dist:
+dist: build/dist.tar.gz
+
+build/dist.tar.gz:
make $(DIST_VARS) real-dist
-real-dist: dist-build html html-scipyorg
- test -d build/latex || make latex
+real-dist: dist-build html-build html-scipyorg
+ test -d build/latex || make latex-build
make -C build/latex all-pdf
- -test -d build/htmlhelp || make htmlhelp-build
-rm -rf build/dist
cp -r build/html-scipyorg build/dist
cd build/html && zip -9r ../dist/numpy-html.zip .
@@ -111,7 +121,7 @@ dist-build:
install -d $(subst :, ,$(INSTALL_PPH))
$(PYTHON) `which easy_install` --prefix=$(INSTALL_DIR) ../dist/*.egg
-upload:
+upload: build/dist.tar.gz
# SSH must be correctly configured for this to work.
# Assumes that ``make dist`` was already run
# Example usage: ``make upload USERNAME=rgommers RELEASE=1.10.1``
@@ -128,6 +138,32 @@ upload:
ssh $(USERNAME)@docs.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz
ssh $(USERNAME)@docs.scipy.org ln -snf numpy-$(RELEASE) /srv/docs_scipy_org/doc/numpy
+
+merge-doc: build/dist.tar.gz
+ifeq "$(TAG)" ""
+ echo tag "$(TAG)" not of the form 1.18;
+ exit 1;
+endif
+ @# Only clone if the directory does not exist
+ @if ! test -d build/merge; then \
+ git clone https://github.com/numpy/doc build/merge; \
+ fi;
+ @# Remove any old content and copy in the new, add it to git
+ -rm -rf build/merge/$(TAG)/*
+ -mkdir -p build/merge/$(TAG)
+ @# -C changes working directory
+ tar -C build/merge/$(TAG) -xf build/dist.tar.gz
+ git -C build/merge add $(TAG)
+ @# For now, the user must do this. If it is onerous, automate it and change
+ @# the instructions in doc/HOWTO_RELEASE.rst.txt
+ @echo " "
+ @echo New documentation archive added to ./build/merge.
+ @echo Now add/modify the appropiate section after
+ @echo " <!-- insert here -->"
+ @echo in build/merge/index.html,
+ @echo then \"git commit\", \"git push\"
+
+
#------------------------------------------------------------------------------
# Basic Sphinx generation rules for different formats
#------------------------------------------------------------------------------
@@ -137,7 +173,8 @@ build/generate-stamp: $(wildcard source/reference/*.rst)
mkdir -p build
touch build/generate-stamp
-html: generate version-check
+html: version-check html-build
+html-build: generate
mkdir -p build/html build/doctrees
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html $(FILES)
$(PYTHON) postprocess.py html build/html/*.html
@@ -175,7 +212,8 @@ qthelp: generate version-check
mkdir -p build/qthelp build/doctrees
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp $(FILES)
-latex: generate version-check
+latex: version-check latex-build
+latex-build: generate
mkdir -p build/latex build/doctrees
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex $(FILES)
$(PYTHON) postprocess.py tex build/latex/*.tex
diff --git a/doc/Py3K.rst.txt b/doc/Py3K.rst.txt
index f78b9e5db..b23536ca5 100644
--- a/doc/Py3K.rst.txt
+++ b/doc/Py3K.rst.txt
@@ -812,20 +812,20 @@ Types with tp_as_sequence defined
PySequenceMethods in py3k are binary compatible with py2k, but some of the
slots have gone away. I suspect this means some functions need redefining so
-the semantics of the slots needs to be checked.
-
-PySequenceMethods foo_sequence_methods = {
- (lenfunc)0, /* sq_length */
- (binaryfunc)0, /* sq_concat */
- (ssizeargfunc)0, /* sq_repeat */
- (ssizeargfunc)0, /* sq_item */
- (void *)0, /* nee sq_slice */
- (ssizeobjargproc)0, /* sq_ass_item */
- (void *)0, /* nee sq_ass_slice */
- (objobjproc)0, /* sq_contains */
- (binaryfunc)0, /* sq_inplace_concat */
- (ssizeargfunc)0 /* sq_inplace_repeat */
-};
+the semantics of the slots needs to be checked::
+
+ PySequenceMethods foo_sequence_methods = {
+ (lenfunc)0, /* sq_length */
+ (binaryfunc)0, /* sq_concat */
+ (ssizeargfunc)0, /* sq_repeat */
+ (ssizeargfunc)0, /* sq_item */
+ (void *)0, /* nee sq_slice */
+ (ssizeobjargproc)0, /* sq_ass_item */
+ (void *)0, /* nee sq_ass_slice */
+ (objobjproc)0, /* sq_contains */
+ (binaryfunc)0, /* sq_inplace_concat */
+ (ssizeargfunc)0 /* sq_inplace_repeat */
+ };
PyMappingMethods
@@ -840,13 +840,13 @@ Types with tp_as_mapping defined
* multiarray/arrayobject.c
PyMappingMethods in py3k look to be the same as in py2k. The semantics
-of the slots needs to be checked.
+of the slots needs to be checked::
-PyMappingMethods foo_mapping_methods = {
- (lenfunc)0, /* mp_length */
- (binaryfunc)0, /* mp_subscript */
- (objobjargproc)0 /* mp_ass_subscript */
-};
+ PyMappingMethods foo_mapping_methods = {
+ (lenfunc)0, /* mp_length */
+ (binaryfunc)0, /* mp_subscript */
+ (objobjargproc)0 /* mp_ass_subscript */
+ };
PyFile
diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt
index 6987dd6c1..0a761e350 100644
--- a/doc/RELEASE_WALKTHROUGH.rst.txt
+++ b/doc/RELEASE_WALKTHROUGH.rst.txt
@@ -38,6 +38,11 @@ to the maintenance branch, and later will be forward ported to master.
Finish the Release Note
-----------------------
+.. note:
+
+ This has changed now that we use ``towncrier``. See the instructions for
+ creating the release note in ``doc/release/upcoming_changes/README.rst``.
+
Fill out the release note ``doc/release/1.14.5-notes.rst`` calling out
significant changes.
@@ -51,7 +56,7 @@ repository::
$ git checkout maintenance/1.14.x
$ git pull upstream maintenance/1.14.x
$ git submodule update
- $ git clean -xdf > /dev/null
+ $ git clean -xdfq
Edit pavement.py and setup.py as detailed in HOWTO_RELEASE::
@@ -78,7 +83,7 @@ Paver is used to build the source releases. It will create the ``release`` and
``release/installers`` directories and put the ``*.zip`` and ``*.tar.gz``
source releases in the latter. ::
- $ cython --version # check that you have the correct cython version
+ $ python3 -m cython --version # check for correct cython version
$ paver sdist # sdist will do a git clean -xdf, so we omit that
@@ -227,28 +232,39 @@ add files, using an editable text window and as binary uploads.
- Hit the ``{Publish,Update} release`` button at the bottom.
-Upload documents to docs.scipy.org
-----------------------------------
+Upload documents to numpy.org
+-----------------------------
This step is only needed for final releases and can be skipped for
-pre-releases. You will also need upload permission for the document server, if
-you do not have permission ping Pauli Virtanen or Ralf Gommers to generate and
-upload the documentation. Otherwise::
+pre-releases. ``make merge-doc`` clones the ``numpy/doc`` repo into
+``doc/build/merge`` and updates it with the new documentation::
$ pushd doc
$ make dist
- $ make upload USERNAME=<yourname> RELEASE=v1.14.5
+ $ make merge-doc
$ popd
-If the release series is a new one, you will need to rebuild and upload the
-``docs.scipy.org`` front page::
+If the release series is a new one, you will need to add a new section to the
+``doc/build/merge/index.html`` front page just after the "insert here" comment::
+
+ $ gvim doc/build/merge/index.html +/'insert here'
+
+Otherwise, only the ``zip`` and ``pdf`` links should be updated with the
+new tag name::
- $ cd ../docs.scipy.org
- $ gvim index.rst
+ $ gvim doc/build/merge/index.html +/'tag v1.14'
-Note: there is discussion about moving the docs to github. This section will be
-updated when/if that happens.
+You can "test run" the new documentation in a browser to make sure the links
+work::
+ $ firefox doc/build/merge/index.html
+
+Once everything seems satisfactory, commit and upload the changes::
+
+ $ pushd doc/build/merge
+ $ git commit -am"Add documentation for v1.14.5"
+ $ git push
+ $ popd
Announce the release on scipy.org
---------------------------------
diff --git a/doc/changelog/1.16.5-changelog.rst b/doc/changelog/1.16.5-changelog.rst
new file mode 100644
index 000000000..19374058d
--- /dev/null
+++ b/doc/changelog/1.16.5-changelog.rst
@@ -0,0 +1,54 @@
+
+Contributors
+============
+
+A total of 18 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Shadchin
+* Allan Haldane
+* Bruce Merry +
+* Charles Harris
+* Colin Snyder +
+* Dan Allan +
+* Emile +
+* Eric Wieser
+* Grey Baker +
+* Maksim Shabunin +
+* Marten van Kerkwijk
+* Matti Picus
+* Peter Andreas Entschev +
+* Ralf Gommers
+* Richard Harris +
+* Sebastian Berg
+* Sergei Lebedev +
+* Stephan Hoyer
+
+Pull requests merged
+====================
+
+A total of 23 pull requests were merged for this release.
+
+* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py
+* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy
+* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array
+* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports
+* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing
+* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs
+* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns]
+* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
+* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject
+* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors.
+* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked.
+* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers
+* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher.
+* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7
+* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
+* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor
+* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level.
+* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7
+* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1.
+* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds
+* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release.
diff --git a/doc/changelog/1.17.0-changelog.rst b/doc/changelog/1.17.0-changelog.rst
index 7b55f395a..debfb6f5b 100644
--- a/doc/changelog/1.17.0-changelog.rst
+++ b/doc/changelog/1.17.0-changelog.rst
@@ -2,7 +2,7 @@
Contributors
============
-A total of 142 people contributed to this release. People with a "+" by their
+A total of 150 people contributed to this release. People with a "+" by their
names contributed a patch for the first time.
* Aaron Voelker +
@@ -18,6 +18,7 @@ names contributed a patch for the first time.
* Allan Haldane
* Ander Ustarroz +
* Andras Deak
+* Andrea Pattori +
* Andreas Schwab
* Andrew Naguib +
* Andy Scholand +
@@ -37,6 +38,8 @@ names contributed a patch for the first time.
* Christoph Gohlke
* Christopher Whelan +
* Chuanzhu Xu +
+* Colin Snyder +
+* Dan Allan +
* Daniel Hrisca
* Daniel Lawrence +
* Debsankha Manik +
@@ -50,14 +53,17 @@ names contributed a patch for the first time.
* Gary Gurlaskie +
* Gregory Lee +
* Gregory R. Lee
+* Guillaume Horel +
* Hameer Abbasi
* Haoyu Sun +
+* Harmon +
* He Jia +
* Hunter Damron +
* Ian Sanders +
* Ilja +
* Isaac Virshup +
* Isaiah Norton +
+* Jackie Leng +
* Jaime Fernandez
* Jakub Wilk
* Jan S. (Milania1) +
@@ -67,6 +73,7 @@ names contributed a patch for the first time.
* Jim Turner +
* Jingbei Li +
* Joachim Hereth +
+* Johannes Hampp +
* John Belmonte +
* John Kirkham
* John Law +
@@ -84,6 +91,7 @@ names contributed a patch for the first time.
* Kiko Correoso +
* Kriti Singh +
* Lars Grueter +
+* Luis Pedro Coelho
* Maksim Shabunin +
* Manvi07 +
* Mark Harfouche
@@ -125,6 +133,7 @@ names contributed a patch for the first time.
* Shekhar Prasad Rajak +
* Stefan van der Walt
* Stephan Hoyer
+* Steve Stagg +
* SuryaChand P +
* Søren Rasmussen +
* Thibault Hallouin +
@@ -143,7 +152,6 @@ names contributed a patch for the first time.
* Yu Kobayashi +
* Yury Kirienko +
* aashuli +
-* euronion +
* luzpaz
* parul +
* spacescientist +
@@ -151,7 +159,7 @@ names contributed a patch for the first time.
Pull requests merged
====================
-A total of 505 pull requests were merged for this release.
+A total of 531 pull requests were merged for this release.
* `#4808 <https://github.com/numpy/numpy/pull/4808>`__: ENH: Make the `mode` parameter of np.pad default to 'constant'
* `#8131 <https://github.com/numpy/numpy/pull/8131>`__: BUG: Fix help() formatting for deprecated functions.
@@ -598,7 +606,7 @@ A total of 505 pull requests were merged for this release.
* `#13720 <https://github.com/numpy/numpy/pull/13720>`__: MAINT/BUG: Manage more files with with statements
* `#13721 <https://github.com/numpy/numpy/pull/13721>`__: MAINT,BUG: More ufunc exception cleanup
* `#13724 <https://github.com/numpy/numpy/pull/13724>`__: MAINT: fix use of cache_dim
-* `#13725 <https://github.com/numpy/numpy/pull/13725>`__: BUG: fix compilation of 3rdparty modules with Py_LIMITED_API...
+* `#13725 <https://github.com/numpy/numpy/pull/13725>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
* `#13726 <https://github.com/numpy/numpy/pull/13726>`__: MAINT: Update PCG jump sizes
* `#13729 <https://github.com/numpy/numpy/pull/13729>`__: DOC: Merge together DISTUTILS.rst.txt#template-files" and distutils.r…
* `#13730 <https://github.com/numpy/numpy/pull/13730>`__: MAINT: Change keyword from reserved word
@@ -658,3 +666,29 @@ A total of 505 pull requests were merged for this release.
* `#13869 <https://github.com/numpy/numpy/pull/13869>`__: DOC: Prepare for 1.17.0rc1 release
* `#13870 <https://github.com/numpy/numpy/pull/13870>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
* `#13873 <https://github.com/numpy/numpy/pull/13873>`__: ENH: Rename default_gen -> default_rng
+* `#13893 <https://github.com/numpy/numpy/pull/13893>`__: DOC: fix links in 1.17 release note
+* `#13897 <https://github.com/numpy/numpy/pull/13897>`__: DOC: Use Cython >= 0.29.11 for Python 3.8 support.
+* `#13932 <https://github.com/numpy/numpy/pull/13932>`__: MAINT,BUG,DOC: Fix errors in _add_newdocs
+* `#13963 <https://github.com/numpy/numpy/pull/13963>`__: ENH, BUILD: refactor all OpenBLAS downloads into a single, testable...
+* `#13971 <https://github.com/numpy/numpy/pull/13971>`__: DOC: emphasize random API changes
+* `#13972 <https://github.com/numpy/numpy/pull/13972>`__: MAINT: Rewrite Floyd algorithm
+* `#13992 <https://github.com/numpy/numpy/pull/13992>`__: BUG: Do not crash on recursive `.dtype` attribute lookup.
+* `#13993 <https://github.com/numpy/numpy/pull/13993>`__: DEP: Speed up WarnOnWrite deprecation in buffer interface
+* `#13995 <https://github.com/numpy/numpy/pull/13995>`__: BLD: Remove Trusty dist in Travis CI build
+* `#13996 <https://github.com/numpy/numpy/pull/13996>`__: BUG: Handle weird bytestrings in dtype()
+* `#13997 <https://github.com/numpy/numpy/pull/13997>`__: BUG: i0 Bessel function regression on array-likes supporting...
+* `#13998 <https://github.com/numpy/numpy/pull/13998>`__: BUG: Missing warnings import in polyutils.
+* `#13999 <https://github.com/numpy/numpy/pull/13999>`__: DOC: Document array_function at a higher level.
+* `#14001 <https://github.com/numpy/numpy/pull/14001>`__: DOC: Show workaround for Generator.integers backward compatibility
+* `#14021 <https://github.com/numpy/numpy/pull/14021>`__: DOC: Prepare 1.17.0rc2 release.
+* `#14040 <https://github.com/numpy/numpy/pull/14040>`__: DOC: Improve quickstart documentation of new random Generator.
+* `#14041 <https://github.com/numpy/numpy/pull/14041>`__: TST, MAINT: expand OpenBLAS version checking
+* `#14080 <https://github.com/numpy/numpy/pull/14080>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#14081 <https://github.com/numpy/numpy/pull/14081>`__: BUG: fix build issue on icc 2016
+* `#14082 <https://github.com/numpy/numpy/pull/14082>`__: BUG: Fix file-like object check when saving arrays
+* `#14109 <https://github.com/numpy/numpy/pull/14109>`__: REV: "ENH: Improved performance of PyArray_FromAny for sequences...
+* `#14126 <https://github.com/numpy/numpy/pull/14126>`__: BUG, TEST: Adding validation test suite to validate float32 exp
+* `#14127 <https://github.com/numpy/numpy/pull/14127>`__: DOC: Add blank line above doctest for intersect1d
+* `#14128 <https://github.com/numpy/numpy/pull/14128>`__: MAINT: adjustments to test_ufunc_noncontigous
+* `#14129 <https://github.com/numpy/numpy/pull/14129>`__: MAINT: Use equality instead of identity check with literal
+* `#14133 <https://github.com/numpy/numpy/pull/14133>`__: MAINT: Update mailmap and changelog for 1.17.0
diff --git a/doc/changelog/1.17.1-changelog.rst b/doc/changelog/1.17.1-changelog.rst
new file mode 100644
index 000000000..c7c8b6c8e
--- /dev/null
+++ b/doc/changelog/1.17.1-changelog.rst
@@ -0,0 +1,55 @@
+
+Contributors
+============
+
+A total of 17 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Jung +
+* Allan Haldane
+* Charles Harris
+* Eric Wieser
+* Giuseppe Cuccu +
+* Hiroyuki V. Yamazaki
+* Jérémie du Boisberranger
+* Kmol Yuan +
+* Matti Picus
+* Max Bolingbroke +
+* Maxwell Aladago +
+* Oleksandr Pavlyk
+* Peter Andreas Entschev
+* Sergei Lebedev
+* Seth Troisi +
+* Vladimir Pershin +
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 24 pull requests were merged for this release.
+
+* `#14156 <https://github.com/numpy/numpy/pull/14156>`__: TST: Allow fuss in testing strided/non-strided exp/log loops
+* `#14157 <https://github.com/numpy/numpy/pull/14157>`__: BUG: avx2_scalef_ps must be static
+* `#14158 <https://github.com/numpy/numpy/pull/14158>`__: BUG: Remove stray print that causes a SystemError on python 3.7.
+* `#14159 <https://github.com/numpy/numpy/pull/14159>`__: BUG: Fix DeprecationWarning in python 3.8.
+* `#14160 <https://github.com/numpy/numpy/pull/14160>`__: BLD: Add missing gcd/lcm definitions to npy_math.h
+* `#14161 <https://github.com/numpy/numpy/pull/14161>`__: DOC, BUILD: cleanups and fix (again) 'build dist'
+* `#14166 <https://github.com/numpy/numpy/pull/14166>`__: TST: Add 3.8-dev to travisCI testing.
+* `#14194 <https://github.com/numpy/numpy/pull/14194>`__: BUG: Remove the broken clip wrapper (Backport)
+* `#14198 <https://github.com/numpy/numpy/pull/14198>`__: DOC: Fix hermitian argument docs in svd.
+* `#14199 <https://github.com/numpy/numpy/pull/14199>`__: MAINT: Workaround for Intel compiler bug leading to failing test
+* `#14200 <https://github.com/numpy/numpy/pull/14200>`__: TST: Clean up of test_pocketfft.py
+* `#14201 <https://github.com/numpy/numpy/pull/14201>`__: BUG: Make advanced indexing result on read-only subclass writeable...
+* `#14236 <https://github.com/numpy/numpy/pull/14236>`__: BUG: Fixed default BitGenerator name
+* `#14237 <https://github.com/numpy/numpy/pull/14237>`__: ENH: add c-imported modules for freeze analysis in np.random
+* `#14296 <https://github.com/numpy/numpy/pull/14296>`__: TST: Pin pytest version to 5.0.1
+* `#14301 <https://github.com/numpy/numpy/pull/14301>`__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`...
+* `#14302 <https://github.com/numpy/numpy/pull/14302>`__: BUG: Fix formatting error in exception message
+* `#14307 <https://github.com/numpy/numpy/pull/14307>`__: MAINT: random: Match type of SeedSequence.pool_size to DEFAULT_POOL_SIZE.
+* `#14308 <https://github.com/numpy/numpy/pull/14308>`__: BUG: Fix numpy.random bug in platform detection
+* `#14309 <https://github.com/numpy/numpy/pull/14309>`__: ENH: Enable huge pages in all Linux builds
+* `#14330 <https://github.com/numpy/numpy/pull/14330>`__: BUG: Fix segfault in `random.permutation(x)` when x is a string.
+* `#14338 <https://github.com/numpy/numpy/pull/14338>`__: BUG: don't fail when lexsorting some empty arrays (#14228)
+* `#14339 <https://github.com/numpy/numpy/pull/14339>`__: BUG: Fix misuse of .names and .fields in various places (backport...
+* `#14345 <https://github.com/numpy/numpy/pull/14345>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14350 <https://github.com/numpy/numpy/pull/14350>`__: REL: Prepare 1.17.1 release
diff --git a/doc/changelog/1.17.2-changelog.rst b/doc/changelog/1.17.2-changelog.rst
new file mode 100644
index 000000000..144f40038
--- /dev/null
+++ b/doc/changelog/1.17.2-changelog.rst
@@ -0,0 +1,28 @@
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* CakeWithSteak +
+* Charles Harris
+* Dan Allan
+* Hameer Abbasi
+* Lars Grueter
+* Matti Picus
+* Sebastian Berg
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#14418 <https://github.com/numpy/numpy/pull/14418>`__: BUG: Fix aradixsort indirect indexing.
+* `#14420 <https://github.com/numpy/numpy/pull/14420>`__: DOC: Fix a minor typo in dispatch documentation.
+* `#14421 <https://github.com/numpy/numpy/pull/14421>`__: BUG: test, fix regression in converting to ctypes
+* `#14430 <https://github.com/numpy/numpy/pull/14430>`__: BUG: Do not show Override module in private error classes.
+* `#14432 <https://github.com/numpy/numpy/pull/14432>`__: BUG: Fixed maximum relative error reporting in assert_allclose.
+* `#14433 <https://github.com/numpy/numpy/pull/14433>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
+* `#14436 <https://github.com/numpy/numpy/pull/14436>`__: BUG: Update 1.17.x with 1.18.0-dev pocketfft.py.
+* `#14446 <https://github.com/numpy/numpy/pull/14446>`__: REL: Prepare for NumPy 1.17.2 release.
diff --git a/doc/neps/index.rst.tmpl b/doc/neps/index.rst.tmpl
index 0ad8e0f80..4c5b7766f 100644
--- a/doc/neps/index.rst.tmpl
+++ b/doc/neps/index.rst.tmpl
@@ -23,7 +23,7 @@ Meta-NEPs (NEPs about NEPs or Processes)
.. toctree::
:maxdepth: 1
-{% for nep, tags in neps.items() if tags['Type'] == 'Process' %}
+{% for nep, tags in neps.items() if tags['Status'] == 'Active' %}
{{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst
index 89ba177cb..97b69279b 100644
--- a/doc/neps/nep-0000.rst
+++ b/doc/neps/nep-0000.rst
@@ -138,7 +138,7 @@ accepted that a competing proposal is a better alternative.
When a NEP is ``Accepted``, ``Rejected``, or ``Withdrawn``, the NEP should be
updated accordingly. In addition to updating the status field, at the very
least the ``Resolution`` header should be added with a link to the relevant
-post in the mailing list archives.
+thread in the mailing list archives.
NEPs can also be ``Superseded`` by a different NEP, rendering the
original obsolete. The ``Replaced-By`` and ``Replaces`` headers
diff --git a/doc/neps/nep-0019-rng-policy.rst b/doc/neps/nep-0019-rng-policy.rst
index aa5fdc653..9704b24ca 100644
--- a/doc/neps/nep-0019-rng-policy.rst
+++ b/doc/neps/nep-0019-rng-policy.rst
@@ -7,7 +7,7 @@ NEP 19 — Random Number Generator Policy
:Type: Standards Track
:Created: 2018-05-24
:Updated: 2019-05-21
-:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-June/078126.html
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-July/078380.html
Abstract
--------
diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst
index 5acabbf16..dab9ab022 100644
--- a/doc/neps/nep-0021-advanced-indexing.rst
+++ b/doc/neps/nep-0021-advanced-indexing.rst
@@ -630,7 +630,7 @@ At this point we have left the straight forward world of ``oindex`` but can
do random picking of any element from the array. Note that in the last example
a method such as mentioned in the ``Related Questions`` section could be more
straight forward. But this approach is even more flexible, since ``rows``
-does not have to be a simple ``arange``, but could be ``intersting_times``::
+does not have to be a simple ``arange``, but could be ``interesting_times``::
>>> interesting_times = np.array([0, 4, 8, 9, 10])
>>> correct_sensors_at_it = correct_sensors[interesting_times, :]
diff --git a/doc/neps/nep-0024-missing-data-2.rst b/doc/neps/nep-0024-missing-data-2.rst
index c8b19561f..f4414e0a0 100644
--- a/doc/neps/nep-0024-missing-data-2.rst
+++ b/doc/neps/nep-0024-missing-data-2.rst
@@ -28,7 +28,7 @@ Detailed description
Rationale
^^^^^^^^^
-The purpose of this aNEP is to define two interfaces -- one for handling
+The purpose of this NEP is to define two interfaces -- one for handling
'missing values', and one for handling 'masked arrays'.
An ordinary value is something like an integer or a floating point number. A
diff --git a/doc/neps/nep-0028-website-redesign.rst b/doc/neps/nep-0028-website-redesign.rst
new file mode 100644
index 000000000..b418ca831
--- /dev/null
+++ b/doc/neps/nep-0028-website-redesign.rst
@@ -0,0 +1,334 @@
+===================================
+NEP 28 — numpy.org website redesign
+===================================
+
+:Author: Ralf Gommers <ralf.gommers@gmail.com>
+:Author: Joe LaChance <joe@boldmetrics.com>
+:Author: Shekhar Rajak <shekharrajak.1994@gmail.com>
+:Status: Accepted
+:Type: Informational
+:Created: 2019-07-16
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-August/079889.html
+
+
+Abstract
+--------
+
+NumPy is the fundamental library for numerical and scientific computing with
+Python. It is used by millions and has a large team of maintainers and
+contributors. Despite that, its `numpy.org <http://numpy.org>`_ website has
+never received the attention it needed and deserved. We hope and intend to
+change that soon. This document describes ideas and requirements for how to
+design a replacement for the current website, to better serve the needs of
+our diverse community.
+
+At a high level, what we're aiming for is:
+
+- a modern, clean look
+- an easy to deploy static site
+- a structure that's easy to navigate
+- content that addresses all types of stakeholders
+- Possible multilingual translations / i18n
+
+This website serves a couple of roles:
+
+- it's the entry point to the project for new users
+- it should link to the documentation (which is hosted separately, now on
+ http://docs.scipy.org/ and in the near future on http://numpy.org/doc).
+- it should address various aspects of the project (e.g. what NumPy is and
+ why you'd want to use it, community, project organization, funding,
+ relationship with NumFOCUS and possibly other organizations)
+- it should link out to other places, so every type of stakeholder
+ (beginning and advanced user, educators, packagers, funders, etc.)
+ can find their way
+
+
+Motivation and Scope
+--------------------
+
+The current numpy.org website has almost no content and its design is poor.
+This affects many users, who come there looking for information. It also
+affects many other aspects of the NumPy project, from finding new contributors
+to fundraising.
+
+The scope of the proposed redesign is the top-level numpy.org site, which
+now contains only a couple of pages and may contain on the order of ten
+pages after the redesign. Changing the documentation (user guide, reference
+guide, and some other pages in the NumPy Manual) is out of scope for
+this proposal.
+
+
+Detailed description
+--------------------
+
+User Experience
+~~~~~~~~~~~~~~~
+
+Besides the NumPy logo, there is little that can or needs to be kept from the
+current website. We will rely to a large extent on ideas and proposals by the
+designer(s) of the new website.
+
+As reference points we can use the `Jupyter website <https://jupyter.org/>`_,
+which is probably the best designed site in our ecosystem, and the
+`QuantEcon <https://quantecon.org>`_ and `Julia <https://julialang.org>`_
+sites which are well-designed too.
+
+The Website
+~~~~~~~~~~~
+
+A static site is a must. There are many high-quality static site generators.
+The current website uses Sphinx, however that is not the best choice - it's
+hard to theme and results in sites that are too text-heavy due to Sphinx'
+primary aim being documentation.
+
+The following should be considered when choosing a static site generator:
+
+1. *How widely used is it?* This is important when looking for help maintaining
+ or improving the site. More popular frameworks are usually also better
+ maintained, so less chance of bugs or obsolescence.
+2. *Ease of deployment.* Most generators meet this criterion, however things
+ like built-in support for GitHub Pages helps.
+3. *Preferences of who implements the new site.* Everyone has their own
+ preferences. And it's a significant amount of work to build a new site.
+ So we should take the opinion of those doing the work into account.
+
+Traffic
+```````
+
+The current site receives on the order of 500,000 unique visitors per month.
+With a redesigned site and relevant content, there is potential for visitor
+counts to reach 5-6 million -- a similar level as
+`scipy.org <http://scipy.org>`_ or `matplotlib.org <http://matplotlib.org>`_ --
+or more.
+
+Possible options for static site generators
+```````````````````````````````````````````
+
+1. *Jekyll.* This is a well maintained option with 855 Github contributors,
+ with contributions within the last month. Jekyll is written in Ruby, and
+ has a simple CLI interface. Jekyll also has a large directory of
+ `themes <https://jekyllthemes.io>`__, although a majority cost money.
+ There are several themes (`serif <https://jekyllthemes.io/theme/serif>`_,
+ `uBuild <https://jekyllthemes.io/theme/ubuild-jekyll-theme>`_,
+ `Just The Docs <https://jekyllthemes.io/theme/just-the-docs>`_) that are
+ appropriate and free. Most themes are likely responsive for mobile, and
+ that should be a requirement. Jekyll uses a combination of liquid templating
+ and YAML to render HTML, and content is written in Markdown. i18n
+ functionality is not native to Jekyll, but can be added easily.
+ One nice benefit of Jekyll is that it can be run automatically by GitHub
+ Pages, so deployment via a CI system doesn't need to be implemented.
+2. *Hugo.* This is another well maintained option with 554 contributors, with
+ contributions within the last month. Hugo is written in Go, and similar to
+ Jekyll, has a simple to use CLI interface to generate static sites. Again,
+ similar to Jekyll, Hugo has a large directory of
+ `themes <https://themes.gohugo.io>`_. These themes appear to be free,
+ unlike some of Jekyll's themes.
+ (`Sample landing page theme <https://themes.gohugo.io/hugo-hero-theme>`_,
+ `docs theme <https://themes.gohugo.io/hugo-whisper-theme>`_). Hugo uses Jade
+ as its templating language, and content is also written in Markdown. i18n
+ functionality is native to Hugo.
+3. *Docusaurus.* Docusaurus is a responsive static site generator made by Facebook.
+ Unlike the previous options, Docusaurus doesn't come with themes, and thus we
+ would not want to use this for our landing page. This is an excellent docs
+ option written in React. Docusaurus natively has support for i18n (via
+ Crowdin_, document versioning, and document search.
+
+Both Jekyll and Hugo are excellent options that should be supported into the
+future and are good choices for NumPy. Docusaurus has several bonus features
+such as versioning and search that Jekyll and Hugo don't have, but is likely
+a poor candidate for a landing page - it could be a good option for a
+high-level docs site later on though.
+
+Deployment
+~~~~~~~~~~
+
+There is no need for running a server, and doing so is in our experience a
+significant drain on the time of maintainers.
+
+1. *Netlify.* Using netlify is free until 100GB of bandwidth is used. Additional
+ bandwidth costs $20/100GB. They support a global CDN system, which will keep
+ load times quick for users in other regions. Netlify also has Github integration,
+ which will allow for easy deployment. When a pull request is merged, Netlify
+ will automatically deploy the changes. DNS is simple, and HTTPS is also supported.
+2. *Github Pages.* Github Pages also has a 100GB bandwidth limit, and is unclear if
+ additional bandwidth can be purchased. It is also unclear where sites are deployed,
+ and should be assumed sites aren't deployed globally. Github Pages has an easy to
+ use CI & DNS, similar to to Netlify. HTTPS is supported.
+3. *Cloudflare.* An excellent option, additional CI is likely needed for the same
+ ease of deployment.
+
+All of the above options are appropriate for the NumPy site based on current
+traffic. Updating to a new deployment strategy, if needed, is a minor amount of
+work compared to developing the website itself. If a provider such as
+Cloudflare is chosen, additional CI may be required, such as CircleCI, to
+have a similar deployment to GitHub Pages or Netlify.
+
+Analytics
+~~~~~~~~~
+
+It's benefical to maintainers to know how many visitors are coming to
+numpy.org. Google Analytics offers visitor counts and locations. This will
+help to support and deploy more strategically, and help maintainers
+understand where traffic is coming from.
+
+Google Analytics is free. A script, provided by Google, must be added to the home page.
+
+Website Structure
+~~~~~~~~~~~~~~~~~
+
+We aim to keep the first version of the new website small in terms of amount
+of content. New pages can be added later on, it's more important right now to
+get the site design right and get some essential information up. Note that in
+the second half of 2019 we expect to get 1 or 2 tech writers involved in the
+project via Google Season of Docs. They will likely help improve the content
+and organization of that content.
+
+We propose the following structure:
+
+0. Front page: essentials of what NumPy is (compare e.g. jupyter.org), one or
+ a couple key user stories (compare e.g. julialang.org)
+1. Install
+2. Documentation
+3. Array computing
+4. Community
+5. Learning
+6. About Us
+7. Contribute
+8. Donate
+
+There may be a few other pages, e.g. a page on performance, that are linked
+from one of the main pages.
+
+Stakeholder Content
+~~~~~~~~~~~~~~~~~~~
+
+This should have as little content as possible *within the site*. Somewhere
+on the site we should link out to content that's specific to:
+
+- beginning users (quickstart, tutorial)
+- advanced users
+- educators
+- packagers
+- package authors that depend on NumPy
+- funders (governance, roadmap)
+
+Translation (multilingual / i18n)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+NumPy has users all over the world. Most of those users are not native
+English speakers, and many don't speak English well or at all. Therefore
+having content in multiple languages is potentially addressing a large unmet
+need. It would likely also help make the NumPy project more diverse and
+welcoming.
+
+On the other hand, there are good reasons why few projects have a
+multi-lingual site. It's potentially a lot of extra work. Extra work for
+maintainers is costly - they're already struggling to keep up with the work
+load. Therefore we have to very carefully consider whether a multi-lingual
+site is feasible and weight costs and benefits.
+
+We start with an assertion: maintaining translations of all documentation, or
+even the whole user guide, as part of the NumPy project is not feasible. One
+simply has to look at the volume of our documentation and the frequency with
+which we change it to realize that that's the case. Perhaps it will be
+feasible though to translate just the top-level pages of the website. Those
+do not change very often, and it will be a limited amount of content (order
+of magnitude 5-10 pages of text).
+
+We propose the following requirements for adding a language:
+
+- The language must have a dedicated maintainer
+- There must be a way to validate content changes (e.g. a second
+ maintainer/reviewer, or high quality language support in a freely
+ available machine translation tool)
+- The language must have a reasonable size target audience (to be
+ assessed by the NumPy maintainers)
+
+Furthermore we propose a policy for when to remove support for a language again
+(preferably by hiding it rather than deleting content). This may be done when
+the language no longer has a maintainer, and coverage of translations falls
+below an acceptable threshold (say 80%).
+
+Benefits of having translations include:
+
+- Better serve many existing and potential users
+- Potentially attract a culturally and geographically more diverse set of contributors
+
+The tradeoffs are:
+
+- Cost of maintaining a more complex code base
+- Cost of making decisions about whether or not to add a new language
+- Higher cost to making content changes, creates work for language maintainers
+- Any content change should be rolled out with enough delay to have translations in place
+
+Can we define a small enough set of pages and content that it makes sense to do this?
+Probably yes.
+
+Is there an easy to use tool to maintain translations and add them to the website?
+To be discussed - it needs investigating, and may depend on the choice of static site
+generator. One potential option is Crowdin_, which is free for open source projects.
+
+
+Style and graphic design
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Beyond the "a modern, clean look" goal we choose to not specify too much. A
+designer may have much better ideas than the authors of this proposal, hence we
+will work with the designer(s) during the implementation phase.
+
+The NumPy logo could use a touch-up. The logo widely recognized and its colors and
+design are good, however the look-and-feel is perhaps a little dated.
+
+
+Other aspects
+~~~~~~~~~~~~~
+
+A search box would be nice to have. The Sphinx documentation already has a
+search box, however a search box on the main site which provides search results
+for the docs, the website, and perhaps other domains that are relevant for
+NumPy would make sense.
+
+
+Backward compatibility
+----------------------
+
+Given a static site generator is chosen, we will migrate away from Sphinx for
+numpy.org (the website, *not including the docs*). The current deployment can
+be preserved until a future deprecation date is decided (potentially based on
+the comfort level of our new site).
+
+All site generators listed above have visibility into the HTML and Javascript
+that is generated, and can continue to be maintained in the event a given
+project ceases to be maintained.
+
+
+Alternatives
+------------
+
+Alternatives we considered for the overall design of the website:
+
+1. *Update current site.* A new Sphinx theme could be chosen. This would likely
+ take the least amount of resources initially, however, Sphinx does not have
+ the features we are looking for moving forward such as i18n, responsive design,
+ and a clean, modern look.
+ Note that updating the docs Sphinx theme is likely still a good idea - it's
+ orthogonal to this NEP though.
+2. *Create custom site.* This would take the most amount of resources, and is
+ likely to have additional benefit in comparison to a static site generator.
+ All features would be able to be added at the cost of developer time.
+
+
+Discussion
+----------
+
+Mailing list thread discussing this NEP: TODO
+
+
+References and Footnotes
+------------------------
+.. _Crowdin: https://crowdin.com/pricing#annual
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0029-deprecation_policy.rst b/doc/neps/nep-0029-deprecation_policy.rst
new file mode 100644
index 000000000..5451327b3
--- /dev/null
+++ b/doc/neps/nep-0029-deprecation_policy.rst
@@ -0,0 +1,314 @@
+==================================================================================
+NEP 29 — Recommend Python and Numpy version support as a community policy standard
+==================================================================================
+
+
+:Author: Thomas A Caswell <tcaswell@gmail.com>, Andreas Mueller, Brian Granger, Madicken Munk, Ralf Gommers, Matt Haberland <mhaberla@calpoly.edu>, Matthias Bussonnier <bussonniermatthias@gmail.com>, Stefan van der Walt
+:Status: Draft
+:Type: Informational Track
+:Created: 2019-07-13
+
+
+Abstract
+--------
+
+This NEP recommends and encourages all projects across the Scientific
+Python ecosystem to adopt a common "time window-based" policy for
+support of Python and NumPy versions. Standardizing a recommendation
+for project support of minimum Python and NumPy versions will improve
+downstream project planning.
+
+This is an unusual NEP in that it offers recommendations for
+community-wide policy and not for changes to NumPy itself. Since a
+common place for SPEEPs (Scientific Python Ecosystem Enhancement
+Proposals) does not exist and given NumPy's central role in the
+ecosystem, a NEP provides a visible place to document the proposed
+policy.
+
+This NEP is being put forward by maintainers of Matplotlib, scikit-learn,
+IPython, Jupyter, yt, SciPy, NumPy, and scikit-image.
+
+
+
+Detailed description
+--------------------
+
+For the purposes of this NEP we assume semantic versioning and define:
+
+*major version*
+ A release that change the first number (e.g. X.0.0)
+
+*minor version*
+ A release that changes the second number (e.g x.Y.0)
+
+*patch version*
+ A release that changes the third number (e.g. x.y.Z)
+
+
+When a project creates a new major or minor version, we recommend that
+the project should support at least all minor versions of Python
+introduced and released in the prior 42 months ~~from their
+anticipated release date~~ with a minimum of 2 minor versions of
+Python, and all minor versions of NumPy released in the prior 24
+months ~~from their anticipated release date~~ with a minimum of 3
+minor versions of NumPy.
+
+
+The diagram::
+
+ Jan 16 Jan 17 Jan 18 Jan 19 Jan 20
+ | | | | |
+ +++++|+++++++++++|+++++++++++|+++++++++++|+++++++++++|++++++++++++
+ | | | |
+ py 3.5.0 py 3.6.0 py 3.7.0 py 3.8.0
+ |-----------------------------------------> Feb19
+ |-----------------------------------------> Dec19
+ |-----------------------------------------> Nov20
+
+shows the 42 month support windows for Python. A project with a
+major or minor version release in Feb19 should support py35 and newer,
+a project with a major or minor version release in Dec19 should
+support py36 and newer, and a project with a major or minor version
+release in Nov20 should support py37 and newer.
+
+The current Python release cadence is 18 months so a 42 month window
+ensures that there will always be at least two minor versions of Python
+in the window. By padding the window by 6 months from the anticipated
+Python cadence we avoid the edge cases where a project releases
+the month after Python and would effectively only support one
+minor version of Python that has an installed base.
+This six month buffer provides resilience to minor fluctuations /
+delays in the Python release schedule.
+
+Because Python minor version support is based on historical release
+dates, a 36 month time window, and a project's plans, a project can
+decide to drop a given minor version of Python very early in the release
+process.
+
+While there will be some unavoidable mismatch in supported versions of
+Python between projects if releases occurs immediately after a
+minor version of Python ages out. This should not last longer than one
+release cycle of each of the projects, and when a given project does a
+minor or major release, it is guaranteed that there will be a stable
+release of all other projects that support the set of Python the
+new release will support.
+
+If there is a Python 4 or a NumPy 2 this policy will have to be
+reviewed in light of the community's and projects' best interests.
+
+
+Support Table
+~~~~~~~~~~~~~
+
+============ ====== =====
+Date Python NumPy
+------------ ------ -----
+Jan 16, 2019 3.5+ 1.13+
+Mar 14, 2019 3.6+ 1.13+
+Jun 08, 2019 3.6+ 1.14+
+Jan 07, 2020 3.6+ 1.15+
+Jun 23, 2020 3.7+ 1.15+
+Jul 23, 2020 3.7+ 1.16+
+Jan 13, 2021 3.7+ 1.17+
+Jul 26, 2021 3.7+ 1.18+
+Dec 26, 2021 3.8+ 1.18+
+============ ====== =====
+
+
+Drop Schedule
+~~~~~~~~~~~~~
+
+::
+
+ On Jan 16, 2019 drop support for Numpy 1.12 (initially released on Jan 15, 2017)
+ On Mar 14, 2019 drop support for Python 3.5 (initially released on Sep 13, 2015)
+ On Jun 08, 2019 drop support for Numpy 1.13 (initially released on Jun 07, 2017)
+ On Jan 07, 2020 drop support for Numpy 1.14 (initially released on Jan 06, 2018)
+ On Jun 23, 2020 drop support for Python 3.6 (initially released on Dec 23, 2016)
+ On Jul 23, 2020 drop support for Numpy 1.15 (initially released on Jul 23, 2018)
+ On Jan 13, 2021 drop support for Numpy 1.16 (initially released on Jan 13, 2019)
+ On Jul 26, 2021 drop support for Numpy 1.17 (initially released on Jul 26, 2019)
+ On Dec 26, 2021 drop support for Python 3.7 (initially released on Jun 27, 2018)
+
+
+Implementation
+--------------
+
+We suggest that all projects adopt the following language into their
+development guidelines:
+
+
+ - This project supports at least the minor versions of Python
+ initially released 42 months prior to a planned project release
+ date.
+ - The project will always support at least the 2 latest minor
+ versions of Python.
+ - The project will support minor versions of ``numpy`` initially
+ released in the 24 months prior to a planned project release date
+ or the oldest version that supports the minimum Python version
+ (whichever is higher).
+ - The project will always support at least the 3 latest minor
+ versions of NumPy.
+
+ The minimum supported version of Python will be set to
+ ``python_requires`` in ``setup``. All supported minor versions of
+ Python will be in the test matrix and have binary artifacts built
+ for releases.
+
+ The project should adjust upward the minimum Python and NumPy
+ version support on every minor and major release, but never on a
+ patch release.
+
+
+Backward compatibility
+----------------------
+
+No backward compatibility issues.
+
+Alternatives
+------------
+
+Ad-Hoc version support
+~~~~~~~~~~~~~~~~~~~~~~
+
+A project could on every release evaluate whether to increase
+the minimum version of Python supported.
+As a major downside, an ad-hoc approach makes it hard for downstream users to predict what
+the future minimum versions will be. As there is no objective threshold
+to when the minimum version should be dropped, it is easy for these
+version support discussions to devolve into [bike shedding](https://en.wikipedia.org/wiki/Wikipedia:Avoid_Parkinson%27s_bicycle-shed_effect) and acrimony.
+
+
+All CPython supported versions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CPython supported versions of Python are listed in the Python
+Developers Guide and the Python PEPs. Supporting these is a very
+clear and conservative approach. However, it means that there is 4
+year lag between when new language features come into the language and
+when the projects are able to use them. Additionally, for projects
+that have a significant component of compiled extensions this requires
+building many binary artifacts for each release.
+
+For the case of NumPy, many projects carry workarounds to bugs that
+are fixed in subsequent versions of NumPy. Being proactive about
+increasing the minimum version of NumPy will allow downstream
+packages to carry fewer version-specific patches.
+
+
+
+Default version on Linux distribution
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The policy could be to support the version of Python that ships by
+default in the latest Ubuntu LTS or CentOS/RHEL release. However, we
+would still have to standardize across the community which
+distribution we are following.
+
+By following the versions supported by major Linux distributions, we
+are giving up technical control of our projects to external
+organizations that may have different motivations and concerns than we
+do.
+
+N minor versions of Python
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Given the current release cadence of the Python, the proposed time (42
+months) is roughly equivalent to "the last two" Python minor versions.
+However, if Python changes their release cadence substantially, any
+rule based solely on the number of minor releases may need to be
+changed to remain sensible.
+
+A more fundamental problem with a policy based on number of Python
+releases is that it is hard to predict when support for a given minor
+version of Python will be dropped as that requires correctly
+predicting the release schedule of Python for the next 3-4 years. A
+time-based rule is only depends on things that have already happened
+and the length of the support window.
+
+
+
+
+Time window from the X.Y.1 Python release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is equivalent to a few month longer support window from the X.Y.0
+release. This is because X.Y.1 bug-fix release is typically a few
+months after the X.Y.0 release, thus a N month window from X.Y.1 is
+roughly equivalent to a N+3 month from X.Y.0.
+
+The X.Y.0 release is naturally a special release. If we were to
+anchor the window on X.Y.1 we would then have the discussion of why
+not X.Y.M?
+
+
+Discussion
+----------
+
+
+References and Footnotes
+------------------------
+
+Code to generate support and drop schedule tables ::
+
+ from datetime import datetime, timedelta
+
+ data = """Jan 15, 2017: Numpy 1.12
+ Sep 13, 2015: Python 3.5
+ Jun 27, 2018: Python 3.7
+ Dec 23, 2016: Python 3.6
+ Jun 07, 2017: Numpy 1.13
+ Jan 06, 2018: Numpy 1.14
+ Jul 23, 2018: Numpy 1.15
+ Jan 13, 2019: Numpy 1.16
+ Jul 26, 2019: Numpy 1.17
+ """
+
+ releases = []
+
+ plus42 = timedelta(days=int(365*3.5 + 1))
+ plus24 = timedelta(days=int(365*2 + 1))
+
+ for line in data.splitlines():
+ date, project_version = line.split(':')
+ project, version = project_version.strip().split(' ')
+ release = datetime.strptime(date, '%b %d, %Y')
+ if project.lower() == 'numpy':
+ drop = release + plus24
+ else:
+ drop = release + plus42
+ releases.append((drop, project, version, release))
+
+ releases = sorted(releases, key=lambda x: x[0])
+
+ minpy = '3.8+'
+ minnum = '1.18+'
+
+ toprint_drop_dates = ['']
+ toprint_support_table = []
+ for d, p, v, r in releases[::-1]:
+ df = d.strftime('%b %d, %Y')
+ toprint_drop_dates.append(
+ f'On {df} drop support for {p} {v} '
+ f'(initially released on {r.strftime("%b %d, %Y")})')
+ toprint_support_table.append(f'{df} {minpy:<6} {minnum:<5}')
+ if p.lower() == 'numpy':
+ minnum = v+'+'
+ else:
+ minpy = v+'+'
+
+ for e in toprint_drop_dates[::-1]:
+ print(e)
+
+ print('============ ====== =====')
+ print('Date Python NumPy')
+ print('------------ ------ -----')
+ for e in toprint_support_table[::-1]:
+ print(e)
+ print('============ ====== =====')
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst
new file mode 100644
index 000000000..353c5df1e
--- /dev/null
+++ b/doc/neps/nep-0030-duck-array-protocol.rst
@@ -0,0 +1,183 @@
+======================================================
+NEP 30 — Duck Typing for NumPy Arrays - Implementation
+======================================================
+
+:Author: Peter Andreas Entschev <pentschev@nvidia.com>
+:Author: Stephan Hoyer <shoyer@google.com>
+:Status: Draft
+:Type: Standards Track
+:Created: 2019-07-31
+:Updated: 2019-07-31
+:Resolution:
+
+Abstract
+--------
+
+We propose the ``__duckarray__`` protocol, following the high-level overview
+described in NEP 22, allowing downstream libraries to return arrays of their
+defined types, in contrast to ``np.asarray``, that coerces those ``array_like``
+objects to NumPy arrays.
+
+Detailed description
+--------------------
+
+NumPy's API, including array definitions, is implemented and mimicked in
+countless other projects. By definition, many of those arrays are fairly
+similar in how they operate to the NumPy standard. The introduction of
+``__array_function__`` allowed dispathing of functions implemented by several
+of these projects directly via NumPy's API. This introduces a new requirement,
+returning the NumPy-like array itself, rather than forcing a coercion into a
+pure NumPy array.
+
+For the purpose above, NEP 22 introduced the concept of duck typing to NumPy
+arrays. The suggested solution described in the NEP allows libraries to avoid
+coercion of a NumPy-like array to a pure NumPy array where necessary, while
+still allowing that NumPy-like array libraries that do not wish to implement
+the protocol to coerce arrays to a pure Numpy array via ``np.asarray``.
+
+Usage Guidance
+~~~~~~~~~~~~~~
+
+Code that uses np.duckarray is meant for supporting other ndarray-like objects
+that "follow the NumPy API". That is an ill-defined concept at the moment --
+every known library implements the NumPy API only partly, and many deviate
+intentionally in at least some minor ways. This cannot be easily remedied, so
+for users of ``__duckarray__`` we recommend the following strategy: check if the
+NumPy functionality used by the code that follows your use of ``__duckarray__``
+is present in Dask, CuPy and Sparse. If so, it's reasonable to expect any duck
+array to work here. If not, we suggest you indicate in your docstring what kinds
+of duck arrays are accepted, or what properties they need to have.
+
+To exemplify the usage of duck arrays, suppose one wants to take the ``mean()``
+of an array-like object ``arr``. Using NumPy to achieve that, one could write
+``np.asarray(arr).mean()`` to achieve the intended result. However, libraries
+may expect ``arr`` to be a NumPy-like array, and at the same time, the array may
+or may not be an object compliant to the NumPy API (either in full or partially)
+such as a CuPy, Sparse or a Dask array. In the case where ``arr`` is already an
+object compliant to the NumPy API, we would simply return it (and prevent it
+from being coerced into a pure NumPy array), otherwise, it would then be coerced
+into a NumPy array.
+
+Implementation
+--------------
+
+The implementation idea is fairly straightforward, requiring a new function
+``duckarray`` to be introduced in NumPy, and a new method ``__duckarray__`` in
+NumPy-like array classes. The new ``__duckarray__`` method shall return the
+downstream array-like object itself, such as the ``self`` object. If appropriate,
+an ``__array__`` method may be implemented that returns a NumPy array or possibly
+raise a ``TypeError`` with a helpful message.
+
+The new NumPy ``duckarray`` function can be implemented as follows:
+
+.. code:: python
+
+ def duckarray(array_like):
+ if hasattr(array_like, '__duckarray__'):
+ return array_like.__duckarray__()
+ return np.asarray(array_like)
+
+Example for a project implementing NumPy-like arrays
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now consider a library that implements a NumPy-compatible array class called
+``NumPyLikeArray``, this class shall implement the methods described above, and
+a complete implementation would look like the following:
+
+.. code:: python
+
+ class NumPyLikeArray:
+ def __duckarray__(self):
+ return self
+
+ def __array__(self):
+ return TypeError("NumPyLikeArray can not be converted to a numpy array. "
+ "You may want to use np.duckarray.")
+
+The implementation above exemplifies the simplest case, but the overall idea
+is that libraries will implement a ``__duckarray__`` method that returns the
+original object, and an ``__array__`` method that either creates and returns an
+appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use
+as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary
+object that does not implement ``__array__``, it will create a NumPy array
+scalar).
+
+In case of existing libraries that don't already implement ``__array__`` but
+would like to use duck array typing, it is advised that they introduce
+both ``__array__`` and``__duckarray__`` methods.
+
+Usage
+-----
+
+An example of how the ``__duckarray__`` protocol could be used to write a
+``stack`` function based on ``concatenate``, and its produced outcome, can be
+seen below. The example here was chosen not only to demonstrate the usage of
+the ``duckarray`` function, but also to demonstrate its dependency on the NumPy
+API, demonstrated by checks on the array's ``shape`` attribute. Note that the
+example is merely a simplified version of NumPy's actualy implementation of
+``stack`` working on the first axis, and it is assumed that Dask has implemented
+the ``__duckarray__`` method.
+
+.. code:: python
+
+ def duckarray_stack(arrays):
+ arrays = [np.duckarray(arr) for arr in arrays]
+
+ shapes = {arr.shape for arr in arrays}
+ if len(shapes) != 1:
+ raise ValueError('all input arrays must have the same shape')
+
+ expanded_arrays = [arr[np.newaxis, ...] for arr in arrays]
+ return np.concatenate(expanded_arrays, axis=0)
+
+ dask_arr = dask.array.arange(10)
+ np_arr = np.arange(10)
+ np_like = list(range(10))
+
+ duckarray_stack((dask_arr, dask_arr)) # Returns dask.array
+ duckarray_stack((dask_arr, np_arr)) # Returns dask.array
+ duckarray_stack((dask_arr, np_like)) # Returns dask.array
+
+In contrast, using only ``np.asarray`` (at the time of writing of this NEP, this
+is the usual method employed by library developers to ensure arrays are
+NumPy-like) has a different outcome:
+
+.. code:: python
+
+ def asarray_stack(arrays):
+ arrays = [np.asanyarray(arr) for arr in arrays]
+
+ # The remaining implementation is the same as that of
+ # ``duckarray_stack`` above
+
+ asarray_stack((dask_arr, dask_arr)) # Returns np.ndarray
+ asarray_stack((dask_arr, np_arr)) # Returns np.ndarray
+ asarray_stack((dask_arr, np_like)) # Returns np.ndarray
+
+Backward compatibility
+----------------------
+
+This proposal does not raise any backward compatibility issues within NumPy,
+given that it only introduces a new function. However, downstream libraries
+that opt to introduce the ``__duckarray__`` protocol may choose to remove the
+ability of coercing arrays back to a NumPy array via ``np.array`` or
+``np.asarray`` functions, preventing unintended effects of coercion of such
+arrays back to a pure NumPy array (as some libraries already do, such as CuPy
+and Sparse), but still leaving libraries not implementing the protocol with the
+choice of utilizing ``np.duckarray`` to promote ``array_like`` objects to pure
+NumPy arrays.
+
+Previous proposals and discussion
+---------------------------------
+
+The duck typing protocol proposed here was described in a high level in
+`NEP 22 <https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html>`_.
+
+Additionally, longer discussions about the protocol and related proposals
+took place in
+`numpy/numpy #13831 <https://github.com/numpy/numpy/issues/13831>`_
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0032-remove-financial-functions.rst b/doc/neps/nep-0032-remove-financial-functions.rst
new file mode 100644
index 000000000..a78b11fea
--- /dev/null
+++ b/doc/neps/nep-0032-remove-financial-functions.rst
@@ -0,0 +1,214 @@
+==================================================
+NEP 32 — Remove the financial functions from NumPy
+==================================================
+
+:Author: Warren Weckesser <warren.weckesser@gmail.com>
+:Status: Accepted
+:Type: Standards Track
+:Created: 2019-08-30
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html
+
+
+Abstract
+--------
+
+We propose deprecating and ultimately removing the financial functions [1]_
+from NumPy. The functions will be moved to an independent repository,
+and provided to the community as a separate package with the name
+``numpy_financial``.
+
+
+Motivation and scope
+--------------------
+
+The NumPy financial functions [1]_ are the 10 functions ``fv``, ``ipmt``,
+``irr``, ``mirr``, ``nper``, ``npv``, ``pmt``, ``ppmt``, ``pv`` and ``rate``.
+The functions provide elementary financial calculations such as future value,
+net present value, etc. These functions were added to NumPy in 2008 [2]_.
+
+In May, 2009, a request by Joe Harrington to add a function called ``xirr`` to
+the financial functions triggered a long thread about these functions [3]_.
+One important point that came up in that thread is that a "real" financial
+library must be able to handle real dates. The NumPy financial functions do
+not work with actual dates or calendars. The preference for a more capable
+library independent of NumPy was expressed several times in that thread.
+
+In June, 2009, D. L. Goldsmith expressed concerns about the correctness of the
+implementations of some of the financial functions [4]_. It was suggested then
+to move the financial functions out of NumPy to an independent package.
+
+In a GitHub issue in 2013 [5]_, Nathaniel Smith suggested moving the financial
+functions from the top-level namespace to ``numpy.financial``. He also
+suggested giving the functions better names. Responses at that time included
+the suggestion to deprecate them and move them from NumPy to a separate
+package. This issue is still open.
+
+Later in 2013 [6]_, it was suggested on the mailing list that these functions
+be removed from NumPy.
+
+The arguments for the removal of these functions from NumPy:
+
+* They are too specialized for NumPy.
+* They are not actually useful for "real world" financial calculations, because
+ they do not handle real dates and calendars.
+* The definition of "correctness" for some of these functions seems to be a
+ matter of convention, and the current NumPy developers do not have the
+ background to judge their correctness.
+* There has been little interest among past and present NumPy developers
+ in maintaining these functions.
+
+The main arguments for keeping the functions in NumPy are:
+
+* Removing these functions will be disruptive for some users. Current users
+ will have to add the new ``numpy_financial`` package to their dependencies,
+ and then modify their code to use the new package.
+* The functions provided, while not "industrial strength", are apparently
+ similar to functions provided by spreadsheets and some calculators. Having
+ them available in NumPy makes it easier for some developers to migrate their
+ software to Python and NumPy.
+
+It is clear from comments in the mailing list discussions and in the GitHub
+issues that many current NumPy developers believe the benefits of removing
+the functions outweigh the costs. For example, from [5]_::
+
+ The financial functions should probably be part of a separate package
+ -- Charles Harris
+
+ If there's a better package we can point people to we could just deprecate
+ them and then remove them entirely... I'd be fine with that too...
+ -- Nathaniel Smith
+
+ +1 to deprecate them. If no other package exists, it can be created if
+ someone feels the need for that.
+ -- Ralf Gommers
+
+ I feel pretty strongly that we should deprecate these. If nobody on numpy’s
+ core team is interested in maintaining them, then it is purely a drag on
+ development for NumPy.
+ -- Stephan Hoyer
+
+And from the 2013 mailing list discussion, about removing the functions from
+NumPy::
+
+ I am +1 as well, I don't think they should have been included in the first
+ place.
+ -- David Cournapeau
+
+But not everyone was in favor of removal::
+
+ The fin routines are tiny and don't require much maintenance once
+ written. If we made an effort (putting up pages with examples of common
+ financial calculations and collecting those under a topical web page,
+ then linking to that page from various places and talking it up), I
+ would think they could attract users looking for a free way to play with
+ financial scenarios. [...]
+ So, I would say we keep them. If ours are not the best, we should bring
+ them up to snuff.
+ -- Joe Harrington
+
+For an idea of the maintenance burden of the financial functions, one can
+look for all the GitHub issues [7]_ and pull requests [8]_ that have the tag
+``component: numpy.lib.financial``.
+
+One method for measuring the effect of removing these functions is to find
+all the packages on GitHub that use them. Such a search can be performed
+with the ``python-api-inspect`` service [9]_. A search for all uses of the
+NumPy financial functions finds just eight repositories. (See the comments
+in [5]_ for the actual SQL query.)
+
+
+Implementation
+--------------
+
+* Create a new Python package, ``numpy_financial``, to be maintained in the
+ top-level NumPy github organization. This repository will contain the
+ definitions and unit tests for the financial functions. The package will
+ be added to PyPI so it can be installed with ``pip``.
+* Deprecate the financial functions in the ``numpy`` namespace, beginning in
+ NumPy version 1.18. Remove the financial functions from NumPy version 1.20.
+
+
+Backward compatibility
+----------------------
+
+The removal of these functions breaks backward compatibility, as explained
+earlier. The effects are mitigated by providing the ``numpy_financial``
+library.
+
+
+Alternatives
+------------
+
+The following alternatives were mentioned in [5]_:
+
+* *Maintain the functions as they are (i.e. do nothing).*
+ A review of the history makes clear that this is not the preference of many
+ NumPy developers. A recurring comment is that the functions simply do not
+ belong in NumPy. When that sentiment is combined with the history of bug
+ reports and the ongoing questions about the correctness of the functions, the
+ conclusion is that the cleanest solution is deprecation and removal.
+* *Move the functions from the ``numpy`` namespace to ``numpy.financial``.*
+ This was the initial suggestion in [5]_. Such a change does not address the
+ maintenance issues, and doesn't change the misfit that many developers see
+ between these functions and NumPy. It causes disruption for the current
+ users of these functions without addressing what many developers see as the
+ fundamental problem.
+
+
+Discussion
+----------
+
+Links to past mailing list discussions, and to relevant GitHub issues and pull
+requests, have already been given. The announcement of this NEP was made on
+the NumPy-Discussion mailing list on 3 September 2019 [10]_, and on the
+PyData mailing list on 8 September 2019 [11]_. The formal proposal to accept
+the NEP was made on 19 September 2019 [12]_; a notification was also sent to
+PyData (same thread as [11]_). There have been no substantive objections.
+
+
+References and footnotes
+------------------------
+
+.. [1] Financial functions,
+ https://numpy.org/doc/1.17/reference/routines.financial.html
+
+.. [2] Numpy-discussion mailing list, "Simple financial functions for NumPy",
+ https://mail.python.org/pipermail/numpy-discussion/2008-April/032353.html
+
+.. [3] Numpy-discussion mailing list, "add xirr to numpy financial functions?",
+ https://mail.python.org/pipermail/numpy-discussion/2009-May/042645.html
+
+.. [4] Numpy-discussion mailing list, "Definitions of pv, fv, nper, pmt, and rate",
+ https://mail.python.org/pipermail/numpy-discussion/2009-June/043188.html
+
+.. [5] Get financial functions out of main namespace,
+ https://github.com/numpy/numpy/issues/2880
+
+.. [6] Numpy-discussion mailing list, "Deprecation of financial routines",
+ https://mail.python.org/pipermail/numpy-discussion/2013-August/067409.html
+
+.. [7] ``component: numpy.lib.financial`` issues,
+ https://github.com/numpy/numpy/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22component%3A+numpy.lib.financial%22+
+
+.. [8] ``component: numpy.lib.financial`` pull requests,
+ https://github.com/numpy/numpy/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3A%22component%3A+numpy.lib.financial%22+
+
+.. [9] Quansight-Labs/python-api-inspect,
+ https://github.com/Quansight-Labs/python-api-inspect/
+
+.. [10] Numpy-discussion mailing list, "NEP 32: Remove the financial functions
+ from NumPy"
+ https://mail.python.org/pipermail/numpy-discussion/2019-September/079965.html
+
+.. [11] PyData mailing list (pydata@googlegroups.com), "NumPy proposal to
+ remove the financial functions.
+ https://mail.google.com/mail/u/0/h/1w0mjgixc4rpe/?&th=16d5c38be45f77c4&q=nep+32&v=c&s=q
+
+.. [12] Numpy-discussion mailing list, "Proposal to accept NEP 32: Remove the
+ financial functions from NumPy"
+ https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-template.rst b/doc/neps/nep-template.rst
index 2b49ec709..c3d34ea46 100644
--- a/doc/neps/nep-template.rst
+++ b/doc/neps/nep-template.rst
@@ -1,6 +1,6 @@
-=============================
-NEP Template and Instructions
-=============================
+=================================
+NEP X — Template and Instructions
+=================================
:Author: <list of authors' real names and optionally, email addresses>
:Status: <Draft | Active | Accepted | Deferred | Rejected | Withdrawn | Final | Superseded>
@@ -14,6 +14,7 @@ Abstract
The abstract should be a short description of what the NEP will achieve.
+Note that the — in the title is an elongated dash, not -.
Motivation and Scope
--------------------
diff --git a/doc/records.rst.txt b/doc/records.rst.txt
index a608880d7..3c0d55216 100644
--- a/doc/records.rst.txt
+++ b/doc/records.rst.txt
@@ -50,7 +50,7 @@ New possibilities for the "data-type"
**Dictionary (keys "names", "titles", and "formats")**
- This will be converted to a ``PyArray_VOID`` type with corresponding
+ This will be converted to a ``NPY_VOID`` type with corresponding
fields parameter (the formats list will be converted to actual
``PyArray_Descr *`` objects).
@@ -58,10 +58,10 @@ New possibilities for the "data-type"
**Objects (anything with an .itemsize and .fields attribute)**
If its an instance of (a sub-class of) void type, then a new
``PyArray_Descr*`` structure is created corresponding to its
- typeobject (and ``PyArray_VOID``) typenumber. If the type is
+ typeobject (and ``NPY_VOID``) typenumber. If the type is
registered, then the registered type-number is used.
- Otherwise a new ``PyArray_VOID PyArray_Descr*`` structure is created
+ Otherwise a new ``NPY_VOID PyArray_Descr*`` structure is created
and filled ->elsize and ->fields filled in appropriately.
The itemsize attribute must return a number > 0. The fields
diff --git a/doc/release/1.18.0-notes.rst b/doc/release/1.18.0-notes.rst
deleted file mode 100644
index f20d5e3fe..000000000
--- a/doc/release/1.18.0-notes.rst
+++ /dev/null
@@ -1,43 +0,0 @@
-==========================
-NumPy 1.18.0 Release Notes
-==========================
-
-
-Highlights
-==========
-
-
-New functions
-=============
-
-
-Deprecations
-============
-
-
-Future Changes
-==============
-
-
-Expired deprecations
-====================
-
-
-Compatibility notes
-===================
-
-
-C API changes
-=============
-
-
-New Features
-============
-
-
-Improvements
-============
-
-
-Changes
-=======
diff --git a/doc/release/time_based_proposal.rst b/doc/release/time_based_proposal.rst
deleted file mode 100644
index 2eb13562d..000000000
--- a/doc/release/time_based_proposal.rst
+++ /dev/null
@@ -1,129 +0,0 @@
-.. vim:syntax=rst
-
-Introduction
-============
-
-This document proposes some enhancements for numpy and scipy releases.
-Successive numpy and scipy releases are too far apart from a time point of
-view - some people who are in the numpy release team feel that it cannot
-improve without a bit more formal release process. The main proposal is to
-follow a time-based release, with expected dates for code freeze, beta and rc.
-The goal is two folds: make release more predictable, and move the code forward.
-
-Rationale
-=========
-
-Right now, the release process of numpy is relatively organic. When some
-features are there, we may decide to make a new release. Because there is not
-fixed schedule, people don't really know when new features and bug fixes will
-go into a release. More significantly, having an expected release schedule
-helps to *coordinate* efforts: at the beginning of a cycle, everybody can jump
-in and put new code, even break things if needed. But after some point, only
-bug fixes are accepted: this makes beta and RC releases much easier; calming
-things down toward the release date helps focusing on bugs and regressions
-
-Proposal
-========
-
-Time schedule
--------------
-
-The proposed schedule is to release numpy every 9 weeks - the exact period can
-be tweaked if it ends up not working as expected. There will be several stages
-for the cycle:
-
- * Development: anything can happen (by anything, we mean as currently
- done). The focus is on new features, refactoring, etc...
-
- * Beta: no new features. No bug fixing which requires heavy changes.
- regression fixes which appear on supported platforms and were not
- caught earlier.
-
- * Polish/RC: only docstring changes and blocker regressions are allowed.
-
-The schedule would be as follows:
-
- +------+-----------------+-----------------+------------------+
- | Week | 1.3.0 | 1.4.0 | Release time |
- +======+=================+=================+==================+
- | 1 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 2 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 3 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 4 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 5 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 6 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 7 | Beta | | |
- +------+-----------------+-----------------+------------------+
- | 8 | Beta | | |
- +------+-----------------+-----------------+------------------+
- | 9 | Beta | | 1.3.0 released |
- +------+-----------------+-----------------+------------------+
- | 10 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 11 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 12 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 13 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 14 | | Development | |
- +------+-----------------+-----------------+------------------+
- | 15 | | Development | |
- +------+-----------------+-----------------+------------------+
- | 16 | | Beta | |
- +------+-----------------+-----------------+------------------+
- | 17 | | Beta | |
- +------+-----------------+-----------------+------------------+
- | 18 | | Beta | 1.4.0 released |
- +------+-----------------+-----------------+------------------+
-
-Each stage can be defined as follows:
-
- +------------------+-------------+----------------+----------------+
- | | Development | Beta | Polish |
- +==================+=============+================+================+
- | Python Frozen | | slushy | Y |
- +------------------+-------------+----------------+----------------+
- | Docstring Frozen | | slushy | thicker slush |
- +------------------+-------------+----------------+----------------+
- | C code Frozen | | thicker slush | thicker slush |
- +------------------+-------------+----------------+----------------+
-
-Terminology:
-
- * slushy: you can change it if you beg the release team and it's really
- important and you coordinate with docs/translations; no "big"
- changes.
-
- * thicker slush: you can change it if it's an open bug marked
- showstopper for the Polish release, you beg the release team, the
- change is very very small yet very very important, and you feel
- extremely guilty about your transgressions.
-
-The different frozen states are intended to be gradients. The exact meaning is
-decided by the release manager: he has the last word on what's go in, what
-doesn't. The proposed schedule means that there would be at most 12 weeks
-between putting code into the source code repository and being released.
-
-Release team
-------------
-
-For every release, there would be at least one release manager. We propose to
-rotate the release manager: rotation means it is not always the same person
-doing the dirty job, and it should also keep the release manager honest.
-
-References
-==========
-
- * Proposed schedule for Gnome from Havoc Pennington (one of the core
- GTK and Gnome manager):
- https://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html
- The proposed schedule is heavily based on this email
-
- * https://wiki.gnome.org/ReleasePlanning/Freezes
diff --git a/doc/release/upcoming_changes/10151.improvement.rst b/doc/release/upcoming_changes/10151.improvement.rst
new file mode 100644
index 000000000..3706a5132
--- /dev/null
+++ b/doc/release/upcoming_changes/10151.improvement.rst
@@ -0,0 +1,9 @@
+Different C numeric types of the same size have unique names
+------------------------------------------------------------
+On any given platform, two of ``np.intc``, ``np.int_``, and ``np.longlong``
+would previously appear indistinguishable through their ``repr``, despite
+their corresponding ``dtype`` having different properties.
+A similar problem existed for the unsigned counterparts to these types, and on
+some platforms for ``np.double`` and ``np.longdouble``
+
+These types now always print with a unique ``__name__``.
diff --git a/doc/release/upcoming_changes/12284.new_feature.rst b/doc/release/upcoming_changes/12284.new_feature.rst
new file mode 100644
index 000000000..25321cd9b
--- /dev/null
+++ b/doc/release/upcoming_changes/12284.new_feature.rst
@@ -0,0 +1,5 @@
+
+Add our own ``*.pxd`` cython import file
+--------------------------------------------
+Added a ``numpy/__init__.pxd`` file. It will be used for `cimport numpy`
+
diff --git a/doc/release/upcoming_changes/13605.deprecation.rst b/doc/release/upcoming_changes/13605.deprecation.rst
new file mode 100644
index 000000000..bff12e965
--- /dev/null
+++ b/doc/release/upcoming_changes/13605.deprecation.rst
@@ -0,0 +1,9 @@
+`np.fromfile` and `np.fromstring` will error on bad data
+--------------------------------------------------------
+
+In future numpy releases, the functions `np.fromfile` and `np.fromstring`
+will throw an error when parsing bad data.
+This will now give a ``DeprecationWarning`` where previously partial or
+even invalid data was silently returned. This deprecation also affects
+the C defined functions c:func:`PyArray_FromString`` and
+c:func:`PyArray_FromFile`
diff --git a/doc/release/upcoming_changes/13610.improvement.rst b/doc/release/upcoming_changes/13610.improvement.rst
new file mode 100644
index 000000000..6f97b43ad
--- /dev/null
+++ b/doc/release/upcoming_changes/13610.improvement.rst
@@ -0,0 +1,5 @@
+``argwhere`` now produces a consistent result on 0d arrays
+----------------------------------------------------------
+On N-d arrays, `numpy.argwhere` now always produces an array of shape
+``(n_non_zero, arr.ndim)``, even when ``arr.ndim == 0``. Previously, the
+last axis would have a dimension of 1 in this case.
diff --git a/doc/release/upcoming_changes/13899.change.rst b/doc/release/upcoming_changes/13899.change.rst
new file mode 100644
index 000000000..da8277347
--- /dev/null
+++ b/doc/release/upcoming_changes/13899.change.rst
@@ -0,0 +1,4 @@
+Incorrect ``threshold`` in ``np.set_printoptions`` raises ``TypeError`` or ``ValueError``
+-----------------------------------------------------------------------------------------
+Previously an incorrect ``threshold`` raised ``ValueError``; it now raises ``TypeError``
+for non-numeric types and ``ValueError`` for ``nan`` values.
diff --git a/doc/release/upcoming_changes/14036.deprecation.rst b/doc/release/upcoming_changes/14036.deprecation.rst
new file mode 100644
index 000000000..3d997b9a2
--- /dev/null
+++ b/doc/release/upcoming_changes/14036.deprecation.rst
@@ -0,0 +1,4 @@
+Deprecate `PyArray_As1D`, `PyArray_As2D`
+----------------------------------------
+`PyArray_As1D`, `PyArray_As2D` are deprecated, use
+`PyArray_AsCArray` instead \ No newline at end of file
diff --git a/doc/release/upcoming_changes/14036.expired.rst b/doc/release/upcoming_changes/14036.expired.rst
new file mode 100644
index 000000000..05164aa38
--- /dev/null
+++ b/doc/release/upcoming_changes/14036.expired.rst
@@ -0,0 +1,2 @@
+* ``PyArray_As1D`` and ``PyArray_As2D`` have been removed in favor of
+ ``PyArray_AsCArray``
diff --git a/doc/release/upcoming_changes/14039.expired.rst b/doc/release/upcoming_changes/14039.expired.rst
new file mode 100644
index 000000000..effee0626
--- /dev/null
+++ b/doc/release/upcoming_changes/14039.expired.rst
@@ -0,0 +1,2 @@
+* ``np.rank`` has been removed. This was deprecated in NumPy 1.10
+ and has been replaced by ``np.ndim``.
diff --git a/doc/release/upcoming_changes/14100.expired.rst b/doc/release/upcoming_changes/14100.expired.rst
new file mode 100644
index 000000000..e9ea9eeb4
--- /dev/null
+++ b/doc/release/upcoming_changes/14100.expired.rst
@@ -0,0 +1,3 @@
+* ``PyArray_FromDimsAndDataAndDescr`` and ``PyArray_FromDims`` have been
+ removed (they will always raise an error). Use ``PyArray_NewFromDescr``
+ and ``PyArray_SimpleNew`` instead.
diff --git a/doc/release/upcoming_changes/14181.deprecation.rst b/doc/release/upcoming_changes/14181.deprecation.rst
new file mode 100644
index 000000000..9979b2246
--- /dev/null
+++ b/doc/release/upcoming_changes/14181.deprecation.rst
@@ -0,0 +1,3 @@
+Deprecate `np.alen`
+-------------------
+`np.alen` was deprecated. Use `len` instead.
diff --git a/doc/release/upcoming_changes/14248.change.rst b/doc/release/upcoming_changes/14248.change.rst
new file mode 100644
index 000000000..9ae0f16bc
--- /dev/null
+++ b/doc/release/upcoming_changes/14248.change.rst
@@ -0,0 +1,10 @@
+`numpy.distutils`: append behavior changed for LDFLAGS and similar
+------------------------------------------------------------------
+`numpy.distutils` has always overridden rather than appended to ``LDFLAGS`` and
+other similar such environment variables for compiling Fortran extensions. Now
+the default behavior has changed to appending - which is the expected behavior
+in most situations. To preserve the old (overwriting) behavior, set the
+``NPY_DISTUTILS_APPEND_FLAGS`` environment variable to 0. This applies to:
+``LDFLAGS``, ``F77FLAGS``, ``F90FLAGS``, ``FREEFLAGS``, ``FOPT``, ``FDEBUG``,
+and ``FFLAGS``. NumPy 1.16 and 1.17 gave build warnings in situations where this
+change in behavior would have affected the compile flags used.
diff --git a/doc/release/upcoming_changes/14255.improvement.rst b/doc/release/upcoming_changes/14255.improvement.rst
new file mode 100644
index 000000000..e17835efd
--- /dev/null
+++ b/doc/release/upcoming_changes/14255.improvement.rst
@@ -0,0 +1,4 @@
+`numpy.unique` has consistent axes order (except the chosen one) when ``axis`` is not None
+------------------------------------------------------------------------------------------
+Using ``moveaxis`` instead of ``swapaxes`` in `numpy.unique`, so that the ordering of axes
+except the axis in arguments will not be broken.
diff --git a/doc/release/upcoming_changes/14256.expired.rst b/doc/release/upcoming_changes/14256.expired.rst
new file mode 100644
index 000000000..229514171
--- /dev/null
+++ b/doc/release/upcoming_changes/14256.expired.rst
@@ -0,0 +1,3 @@
+* ``numeric.loads``, ``numeric.load``, ``np.ma.dump``,
+ ``np.ma.dumps``, ``np.ma.load``, ``np.ma.loads`` are removed,
+ use ``pickle`` methods instead \ No newline at end of file
diff --git a/doc/release/upcoming_changes/14259.expired.rst b/doc/release/upcoming_changes/14259.expired.rst
new file mode 100644
index 000000000..fee44419b
--- /dev/null
+++ b/doc/release/upcoming_changes/14259.expired.rst
@@ -0,0 +1,6 @@
+* ``arrayprint.FloatFormat``, ``arrayprint.LongFloatFormat`` has been removed,
+ use ``FloatingFormat`` instead
+* ``arrayprint.ComplexFormat``, ``arrayprint.LongComplexFormat`` has been
+ removed, use ``ComplexFloatingFormat`` instead
+* ``arrayprint.StructureFormat`` has been removed, use ``StructureVoidFormat``
+ instead \ No newline at end of file
diff --git a/doc/release/upcoming_changes/14325.expired.rst b/doc/release/upcoming_changes/14325.expired.rst
new file mode 100644
index 000000000..348b3d524
--- /dev/null
+++ b/doc/release/upcoming_changes/14325.expired.rst
@@ -0,0 +1,2 @@
+* ``np.testing.rand`` has been removed. This was deprecated in NumPy 1.11
+ and has been replaced by ``np.random.rand``.
diff --git a/doc/release/upcoming_changes/14335.expired.rst b/doc/release/upcoming_changes/14335.expired.rst
new file mode 100644
index 000000000..53598cea1
--- /dev/null
+++ b/doc/release/upcoming_changes/14335.expired.rst
@@ -0,0 +1,2 @@
+* Class ``SafeEval`` in ``numpy/lib/utils.py`` has been removed. This was deprecated in NumPy 1.10.
+ Use ``np.safe_eval`` instead. \ No newline at end of file
diff --git a/doc/release/upcoming_changes/14393.c_api.rst b/doc/release/upcoming_changes/14393.c_api.rst
new file mode 100644
index 000000000..0afd27584
--- /dev/null
+++ b/doc/release/upcoming_changes/14393.c_api.rst
@@ -0,0 +1,5 @@
+PyDataType_ISUNSIZED(descr) now returns False for structured datatypes
+----------------------------------------------------------------------
+Previously this returned True for any datatype of itemsize 0, but now this
+returns false for the non-flexible datatype with itemsize 0, ``np.dtype([])``.
+
diff --git a/doc/release/upcoming_changes/14464.improvement.rst b/doc/release/upcoming_changes/14464.improvement.rst
new file mode 100644
index 000000000..36ee4090b
--- /dev/null
+++ b/doc/release/upcoming_changes/14464.improvement.rst
@@ -0,0 +1,6 @@
+`numpy.matmul` with boolean output now converts to boolean values
+-----------------------------------------------------------------
+Calling `numpy.matmul` where the output is a boolean array would fill the array
+with uint8 equivalents of the result, rather than 0/1. Now it forces the output
+to 0 or 1 (``NPY_TRUE`` or ``NPY_FALSE``).
+
diff --git a/doc/release/upcoming_changes/14498.change.rst b/doc/release/upcoming_changes/14498.change.rst
new file mode 100644
index 000000000..fd784e289
--- /dev/null
+++ b/doc/release/upcoming_changes/14498.change.rst
@@ -0,0 +1,7 @@
+Remove ``numpy.random.entropy`` without a deprecation
+-----------------------------------------------------
+
+``numpy.random.entropy`` was added to the `numpy.random` namespace in 1.17.0.
+It was meant to be a private c-extension module, but was exposed as public.
+It has been replaced by `numpy.random.SeedSequence` so the module was
+completely removed.
diff --git a/doc/release/upcoming_changes/14501.improvement.rst b/doc/release/upcoming_changes/14501.improvement.rst
new file mode 100644
index 000000000..f397ecccf
--- /dev/null
+++ b/doc/release/upcoming_changes/14501.improvement.rst
@@ -0,0 +1,6 @@
+`numpy.random.randint` produced incorrect value when the range was ``2**32``
+----------------------------------------------------------------------------
+The implementation introduced in 1.17.0 had an incorrect check when
+determining whether to use the 32-bit path or the full 64-bit
+path that incorrectly redirected random integer generation with a high - low
+range of ``2**32`` to the 64-bit generator.
diff --git a/doc/release/upcoming_changes/14510.compatibility.rst b/doc/release/upcoming_changes/14510.compatibility.rst
new file mode 100644
index 000000000..63d46d2f7
--- /dev/null
+++ b/doc/release/upcoming_changes/14510.compatibility.rst
@@ -0,0 +1,12 @@
+`numpy.lib.recfunctions.drop_fields` can no longer return `None`
+----------------------------------------------------------------
+If ``drop_fields`` is used to drop all fields, previously the array would
+be completely discarded and `None` returned. Now it returns an array of the
+same shape as the input, but with no fields. The old behavior can be retained
+with::
+
+ dropped_arr = drop_fields(arr, ['a', 'b'])
+ if dropped_arr.dtype.names == ():
+ dropped_arr = None
+
+converting the empty recarray to `None`
diff --git a/doc/release/upcoming_changes/14518.change.rst b/doc/release/upcoming_changes/14518.change.rst
new file mode 100644
index 000000000..f7b782825
--- /dev/null
+++ b/doc/release/upcoming_changes/14518.change.rst
@@ -0,0 +1,18 @@
+Add options to quiet build configuration and build with ``-Werror``
+-------------------------------------------------------------------
+Added two new configuration options. During the ``build_src`` subcommand, as
+part of configuring NumPy, the files ``_numpyconfig.h`` and ``config.h`` are
+created by probing support for various runtime functions and routines.
+Previously, the very verbose compiler output during this stage clouded more
+important information. By default the output is silenced. Running ``runtests.py
+--debug-configure`` will add ``-v`` to the ``build_src`` subcommand, which
+will restore the previous behaviour.
+
+Adding ``CFLAGS=-Werror`` to turn warnings into errors would trigger errors
+during the configuration. Now ``runtests.py --warn-error`` will add
+``--warn-error`` to the ``build`` subcommand, which will percolate to the
+``build_ext`` and ``build_lib`` subcommands. This will add the compiler flag
+to those stages and turn compiler warnings into errors while actually building
+NumPy itself, avoiding the ``build_src`` subcommand compiler calls.
+
+(`gh-14527 <https://github.com/numpy/numpy/pull/14527>`__)
diff --git a/doc/release/upcoming_changes/14567.expired.rst b/doc/release/upcoming_changes/14567.expired.rst
new file mode 100644
index 000000000..59cb600fb
--- /dev/null
+++ b/doc/release/upcoming_changes/14567.expired.rst
@@ -0,0 +1,5 @@
+The files ``numpy/testing/decorators.py``, ``numpy/testing/noseclasses.py``
+and ``numpy/testing/nosetester.py`` have been removed. They were never
+meant to be public (all relevant objects are present in the
+``numpy.testing`` namespace), and importing them has given a deprecation
+warning since NumPy 1.15.0
diff --git a/doc/release/upcoming_changes/14583.expired.rst b/doc/release/upcoming_changes/14583.expired.rst
new file mode 100644
index 000000000..1fad06309
--- /dev/null
+++ b/doc/release/upcoming_changes/14583.expired.rst
@@ -0,0 +1,2 @@
+* Remove deprecated support for boolean and empty condition lists in
+ `numpy.select`
diff --git a/doc/release/upcoming_changes/README.rst b/doc/release/upcoming_changes/README.rst
new file mode 100644
index 000000000..7f6476bda
--- /dev/null
+++ b/doc/release/upcoming_changes/README.rst
@@ -0,0 +1,55 @@
+:orphan:
+
+Changelog
+=========
+
+This directory contains "news fragments" which are short files that contain a
+small **ReST**-formatted text that will be added to the next what's new page.
+
+Make sure to use full sentences with correct case and punctuation, and please
+try to use Sphinx intersphinx using backticks. The fragment should have a
+header line and an underline using ``------``
+
+Each file should be named like ``<PULL REQUEST>.<TYPE>.rst``, where
+``<PULL REQUEST>`` is a pull request number, and ``<TYPE>`` is one of:
+
+* ``new_function``: New user facing functions.
+* ``deprecation``: Changes existing code to emit a DeprecationWarning.
+* ``future``: Changes existing code to emit a FutureWarning.
+* ``expired``: Removal of a deprecated part of the API.
+* ``compatibility``: A change which requires users to change code and is not
+ backwards compatible. (Not to be used for removal of deprecated features.)
+* ``c_api``: Changes in the Numpy C-API exported functions
+* ``new_feature``: New user facing features like ``kwargs``.
+* ``improvement``: Performance and edge-case changes
+* ``change``: Other changes
+* ``highlight``: Adds a highlight bullet point to use as a possibly highlight
+ of the release.
+
+Most categories should be formatted as paragraphs with a heading.
+So for example: ``123.new_feature.rst`` would have the content::
+
+ ``my_new_feature`` option for `my_favorite_function`
+ ----------------------------------------------------
+ The ``my_new_feature`` option is now available for `my_favorite_function`.
+ To use it, write ``np.my_favorite_function(..., my_new_feature=True)``.
+
+``highlight`` is usually formatted as bulled points making the fragment
+``* This is a highlight``.
+
+Note the use of single-backticks to get an internal link (assuming
+``my_favorite_function`` is exported from the ``numpy`` namespace),
+and double-backticks for code.
+
+If you are unsure what pull request type to use, don't hesitate to ask in your
+PR.
+
+You can install ``towncrier`` and run ``towncrier --draft --version 1.18``
+if you want to get a preview of how your change will look in the final release
+notes.
+
+.. note::
+
+ This README was adapted from the pytest changelog readme under the terms of
+ the MIT licence.
+
diff --git a/doc/release/upcoming_changes/template.rst b/doc/release/upcoming_changes/template.rst
new file mode 100644
index 000000000..9c8a3b5fc
--- /dev/null
+++ b/doc/release/upcoming_changes/template.rst
@@ -0,0 +1,38 @@
+{% set title = "NumPy {} Release Notes".format(versiondata.version) %}
+{{ "=" * title|length }}
+{{ title }}
+{{ "=" * title|length }}
+
+{% for section, _ in sections.items() %}
+{% set underline = underlines[0] %}{% if section %}{{ section }}
+{{ underline * section|length }}{% set underline = underlines[1] %}
+
+{% endif %}
+{% if sections[section] %}
+{% for category, val in definitions.items() if category in sections[section] %}
+
+{{ definitions[category]['name'] }}
+{{ underline * definitions[category]['name']|length }}
+
+{% if definitions[category]['showcontent'] %}
+{% for text, values in sections[section][category].items() %}
+{{ text }}
+{{ get_indent(text) }}({{values|join(', ') }})
+
+{% endfor %}
+{% else %}
+- {{ sections[section][category]['']|join(', ') }}
+
+{% endif %}
+{% if sections[section][category]|length == 0 %}
+No significant changes.
+
+{% else %}
+{% endif %}
+{% endfor %}
+{% else %}
+No significant changes.
+
+
+{% endif %}
+{% endfor %}
diff --git a/doc/source/_static/numpy_logo.png b/doc/source/_static/numpy_logo.png
new file mode 100644
index 000000000..af8cbe323
--- /dev/null
+++ b/doc/source/_static/numpy_logo.png
Binary files differ
diff --git a/doc/source/_templates/autosummary/base.rst b/doc/source/_templates/autosummary/base.rst
new file mode 100644
index 000000000..0331154a7
--- /dev/null
+++ b/doc/source/_templates/autosummary/base.rst
@@ -0,0 +1,14 @@
+{% if objtype == 'property' %}
+:orphan:
+{% endif %}
+
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+{% if objtype == 'property' %}
+property
+{% endif %}
+
+.. auto{{ objtype }}:: {{ objname }}
+
diff --git a/doc/source/_templates/indexsidebar.html b/doc/source/_templates/indexsidebar.html
index 51e7c4308..4707fc0e8 100644
--- a/doc/source/_templates/indexsidebar.html
+++ b/doc/source/_templates/indexsidebar.html
@@ -1,4 +1,5 @@
<h3>Resources</h3>
<ul>
+ <li><a href="https://numpy.org/">NumPy.org website</a></li>
<li><a href="https://scipy.org/">Scipy.org website</a></li>
</ul>
diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html
index 77da54a00..beaa297db 100644
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -1,5 +1,15 @@
{% extends "!layout.html" %}
+{%- block header %}
+<div class="container">
+ <div class="top-scipy-org-logo-header" style="background-color: #a2bae8;">
+ <a href="{{ pathto('index') }}">
+ <img border=0 alt="NumPy" src="{{ pathto('_static/numpy_logo.png', 1) }}"></a>
+ </div>
+ </div>
+</div>
+
+{% endblock %}
{% block rootrellink %}
{% if pagename != 'index' %}
<li class="active"><a href="{{ pathto('index') }}">{{ shorttitle|e }}</a></li>
diff --git a/doc/source/conf.py b/doc/source/conf.py
index fa0c0e7e4..83cecc917 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -3,12 +3,8 @@ from __future__ import division, absolute_import, print_function
import sys, os, re
-# Check Sphinx version
-import sphinx
-if sphinx.__version__ < "1.2.1":
- raise RuntimeError("Sphinx 1.2.1 or newer required")
-
-needs_sphinx = '1.0'
+# Minimum version, enforced by sphinx
+needs_sphinx = '2.2.0'
# -----------------------------------------------------------------------------
# General configuration
@@ -31,13 +27,10 @@ extensions = [
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
+ 'sphinx.ext.imgmath',
]
-if sphinx.__version__ >= "1.4":
- extensions.append('sphinx.ext.imgmath')
- imgmath_image_format = 'svg'
-else:
- extensions.append('sphinx.ext.pngmath')
+imgmath_image_format = 'svg'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -45,6 +38,8 @@ templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
+master_doc = 'contents'
+
# General substitutions.
project = 'NumPy'
copyright = '2008-2019, The SciPy community'
@@ -93,6 +88,7 @@ pygments_style = 'sphinx'
def setup(app):
# add a config value for `ifconfig` directives
app.add_config_value('python_version_major', str(sys.version_info.major), 'env')
+ app.add_lexer('NumPyC', NumPyLexer(stripnl=False))
# -----------------------------------------------------------------------------
# HTML output
@@ -121,7 +117,9 @@ else:
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
- "rootlinks": []
+ "rootlinks": [("https://numpy.org/", "NumPy.org"),
+ ("https://numpy.org/doc", "Docs"),
+ ]
}
html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
@@ -175,6 +173,10 @@ latex_documents = [
# not chapters.
#latex_use_parts = False
+latex_elements = {
+ 'fontenc': r'\usepackage[LGR,T1]{fontenc}'
+}
+
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
@@ -366,18 +368,15 @@ def linkcode_resolve(domain, info):
from pygments.lexers import CLexer
from pygments import token
-from sphinx.highlighting import lexers
import copy
class NumPyLexer(CLexer):
name = 'NUMPYLEXER'
- tokens = copy.deepcopy(lexers['c'].tokens)
+ tokens = copy.deepcopy(CLexer.tokens)
# Extend the regex for valid identifiers with @
for k, val in tokens.items():
for i, v in enumerate(val):
if isinstance(v, tuple):
if isinstance(v[0], str):
val[i] = (v[0].replace('a-zA-Z', 'a-zA-Z@'),) + v[1:]
-
-lexers['NumPyC'] = NumPyLexer(stripnl=False)
diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst
index 1d119ebce..9d618cc9f 100644
--- a/doc/source/dev/development_environment.rst
+++ b/doc/source/dev/development_environment.rst
@@ -19,6 +19,11 @@ sources needs some additional steps, which are explained below. For the rest
of this chapter we assume that you have set up your git repo as described in
:ref:`using-git`.
+.. _testing-builds:
+
+Testing builds
+--------------
+
To build the development version of NumPy and run tests, spawn
interactive shells with the Python import paths properly set up etc.,
do one of::
@@ -47,6 +52,10 @@ When using pytest as a target (the default), you can
$ python runtests.py -v -t numpy/core/tests/test_multiarray.py -- -k "MatMul and not vector"
+.. note::
+
+ Remember that all tests of NumPy should pass before commiting your changes.
+
Using ``runtests.py`` is the recommended approach to running tests.
There are also a number of alternatives to it, for example in-place
build or installing to a virtualenv. See the FAQ below for details.
@@ -87,19 +96,31 @@ installs a ``.egg-link`` file into your site-packages as well as adjusts the
Other build options
-------------------
+Build options can be discovered by running any of::
+
+ $ python setup.py --help
+ $ python setup.py --help-commands
+
It's possible to do a parallel build with ``numpy.distutils`` with the ``-j`` option;
see :ref:`parallel-builds` for more details.
-In order to install the development version of NumPy in ``site-packages``, use
-``python setup.py install --user``.
-
A similar approach to in-place builds and use of ``PYTHONPATH`` but outside the
source tree is to use::
- $ python setup.py install --prefix /some/owned/folder
+ $ pip install . --prefix /some/owned/folder
$ export PYTHONPATH=/some/owned/folder/lib/python3.4/site-packages
+NumPy uses a series of tests to probe the compiler and libc libraries for
+funtions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files
+using ``HAVE_XXX`` definitions. These tests are run during the ``build_src``
+phase of the ``_multiarray_umath`` module in the ``generate_config_h`` and
+``generate_numpyconfig_h`` functions. Since the output of these calls includes
+many compiler warnings and errors, by default it is run quietly. If you wish
+to see this output, you can run the ``build_src`` stage verbosely::
+
+ $ python build build_src -v
+
Using virtualenvs
-----------------
diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst
index 200d95b92..900431374 100644
--- a/doc/source/dev/development_workflow.rst
+++ b/doc/source/dev/development_workflow.rst
@@ -28,7 +28,7 @@ In short:
- *Core developers* If you want to push changes without
further review, see the notes :ref:`below <pushing-to-main>`.
-
+
This way of working helps to keep work well organized and the history
as clear as possible.
@@ -69,7 +69,7 @@ Overview
git status # Optional
git diff # Optional
git add modified_file
- git commit
+ git commit
# push the branch to your own Github repo
git push origin my-new-feature
@@ -112,38 +112,38 @@ In more detail
properly formatted and sufficiently detailed commit message. After saving
your message and closing the editor, your commit will be saved. For trivial
commits, a short commit message can be passed in through the command line
- using the ``-m`` flag. For example, ``git commit -am "ENH: Some message"``.
-
+ using the ``-m`` flag. For example, ``git commit -am "ENH: Some message"``.
+
In some cases, you will see this form of the commit command: ``git commit
-a``. The extra ``-a`` flag automatically commits all modified files and
removes all deleted files. This can save you some typing of numerous ``git
add`` commands; however, it can add unwanted changes to a commit if you're
not careful. For more information, see `why the -a flag?`_ - and the
- helpful use-case description in the `tangled working copy problem`_.
+ helpful use-case description in the `tangled working copy problem`_.
#. Push the changes to your forked repo on github_::
git push origin my-new-feature
For more information, see `git push`_.
-
+
.. note::
-
+
Assuming you have followed the instructions in these pages, git will create
a default link to your github_ repo called ``origin``. In git >= 1.7 you
can ensure that the link to origin is permanently set by using the
``--set-upstream`` option::
-
+
git push --set-upstream origin my-new-feature
-
+
From now on git_ will know that ``my-new-feature`` is related to the
``my-new-feature`` branch in your own github_ repo. Subsequent push calls
are then simplified to the following::
git push
-
+
You have to use ``--set-upstream`` for each new branch that you create.
-
+
It may be the case that while you were working on your edits, new commits have
been added to ``upstream`` that affect your work. In this case, follow the
@@ -194,12 +194,18 @@ Asking for your changes to be merged with the main repo
=======================================================
When you feel your work is finished, you can create a pull request (PR). Github
-has a nice help page that outlines the process for `filing pull requests`_.
+has a nice help page that outlines the process for `filing pull requests`_.
If your changes involve modifications to the API or addition/modification of a
-function, you should initiate a code review. This involves sending an email to
-the `NumPy mailing list`_ with a link to your PR along with a description of
-and a motivation for your changes.
+function, you should
+
+- send an email to the `NumPy mailing list`_ with a link to your PR along with
+ a description of and a motivation for your changes. This may generate
+ changes and feedback. It might be prudent to start with this step if your
+ change may be controversial.
+- add a release note to the ``doc/release/upcoming_changes/`` directory,
+ following the instructions and format in the
+ ``doc/release/upcoming_changes/README.rst`` file.
.. _rebasing-on-master:
@@ -500,7 +506,7 @@ them to ``upstream`` as follows:
git push upstream my-feature-branch:master
-.. note::
+.. note::
It's usually a good idea to use the ``-n`` flag to ``git push`` to check
first that you're about to push the changes you want to the place you
diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst
index 40347f9bf..10af7f221 100644
--- a/doc/source/dev/governance/people.rst
+++ b/doc/source/dev/governance/people.rst
@@ -48,7 +48,7 @@ NumFOCUS Subcommittee
* Jaime Fernández del Río
-* Nathaniel Smith
+* Sebastian Berg
* External member: Thomas Caswell
diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst
index a8bd0bb46..3b409f5ca 100644
--- a/doc/source/dev/index.rst
+++ b/doc/source/dev/index.rst
@@ -2,6 +2,33 @@
Contributing to NumPy
#####################
+Not a coder? Not a problem! NumPy is multi-faceted, and we can use a lot of help.
+These are all activities we'd like to get help with (they're all important, so
+we list them in alphabetical order):
+
+- Code maintenance and development
+- Community coordination
+- DevOps
+- Developing educational content & narrative documentation
+- Writing technical documentation
+- Fundraising
+- Project management
+- Marketing
+- Translating content
+- Website design and development
+
+The rest of this document discusses working on the NumPy code base and documentation.
+We're in the process of updating our descriptions of other activities and roles.
+If you are interested in these other activities, please contact us!
+You can do this via
+the `numpy-discussion mailing list <https://scipy.org/scipylib/mailing-lists.html>`__,
+or on GitHub (open an issue or comment on a relevant issue). These are our preferred
+communication channels (open source is open by nature!), however if you prefer
+to discuss in private first, please reach out to our community coordinators
+at `numpy-team@googlegroups.com` or `numpy-team.slack.com` (send an email to
+`numpy-team@googlegroups.com` for an invite the first time).
+
+
Development process - summary
=============================
diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst
index 4bb7628c1..6deacda5c 100644
--- a/doc/source/docs/howto_build_docs.rst
+++ b/doc/source/docs/howto_build_docs.rst
@@ -5,7 +5,7 @@ Building the NumPy API and reference docs
=========================================
We currently use Sphinx_ for generating the API and reference
-documentation for NumPy. You will need Sphinx 1.8.3 or newer.
+documentation for NumPy. You will need Sphinx 1.8.3 <= 1.8.5.
If you only want to get the documentation, note that pre-built
versions can be found at
diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst
index a91215476..39410b2a4 100644
--- a/doc/source/reference/arrays.classes.rst
+++ b/doc/source/reference/arrays.classes.rst
@@ -82,7 +82,7 @@ NumPy provides several hooks that classes can customize:
:func:`~numpy.matmul`, which currently is not a Ufunc, but could be
relatively easily be rewritten as a (set of) generalized Ufuncs. The
same may happen with functions such as :func:`~numpy.median`,
- :func:`~numpy.min`, and :func:`~numpy.argsort`.
+ :func:`~numpy.amin`, and :func:`~numpy.argsort`.
Like with some other special methods in python, such as ``__hash__`` and
``__iter__``, it is possible to indicate that your class does *not*
diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst
index 387515f59..2225eedb3 100644
--- a/doc/source/reference/arrays.datetime.rst
+++ b/doc/source/reference/arrays.datetime.rst
@@ -26,7 +26,9 @@ be either a :ref:`date unit <arrays.dtypes.dateunits>` or a
:ref:`time unit <arrays.dtypes.timeunits>`. The date units are years ('Y'),
months ('M'), weeks ('W'), and days ('D'), while the time units are
hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and
-some additional SI-prefix seconds-based units.
+some additional SI-prefix seconds-based units. The datetime64 data type
+also accepts the string "NAT", in any combination of lowercase/uppercase
+letters, for a "Not A Time" value.
.. admonition:: Example
@@ -50,6 +52,11 @@ some additional SI-prefix seconds-based units.
>>> np.datetime64('2005-02-25T03:30')
numpy.datetime64('2005-02-25T03:30')
+ NAT (not a time):
+
+ >>> numpy.datetime64('nat')
+ numpy.datetime64('NaT')
+
When creating an array of datetimes from a string, it is still possible
to automatically select the unit from the inputs, by using the
datetime type with generic units.
@@ -100,7 +107,21 @@ Datetime and Timedelta Arithmetic
NumPy allows the subtraction of two Datetime values, an operation which
produces a number with a time unit. Because NumPy doesn't have a physical
quantities system in its core, the timedelta64 data type was created
-to complement datetime64.
+to complement datetime64. The arguments for timedelta64 are a number,
+to represent the number of units, and a date/time unit, such as
+(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64
+data type also accepts the string "NAT" in place of the number for a "Not A Time" value.
+
+.. admonition:: Example
+
+ >>> numpy.timedelta64(1, 'D')
+ numpy.timedelta64(1,'D')
+
+ >>> numpy.timedelta64(4, 'h')
+ numpy.timedelta64(4,'h')
+
+ >>> numpy.timedelta64('nAt')
+ numpy.timedelta64('NaT')
Datetimes and Timedeltas work together to provide ways for
simple datetime calculations.
@@ -122,6 +143,12 @@ simple datetime calculations.
>>> np.timedelta64(1,'W') % np.timedelta64(10,'D')
numpy.timedelta64(7,'D')
+ >>> numpy.datetime64('nat') - numpy.datetime64('2009-01-01')
+ numpy.timedelta64('NaT','D')
+
+ >>> numpy.datetime64('2009-01-01') + numpy.timedelta64('nat')
+ numpy.datetime64('NaT')
+
There are two Timedelta units ('Y', years and 'M', months) which are treated
specially, because how much time they represent changes depending
on when they are used. While a timedelta day unit is equivalent to
@@ -366,132 +393,4 @@ As a corollary to this change, we no longer prohibit casting between datetimes
with date units and datetimes with timeunits. With timezone naive datetimes,
the rule for casting from dates to times is no longer ambiguous.
-.. _pandas: http://pandas.pydata.org
-
-
-Differences Between 1.6 and 1.7 Datetimes
-=========================================
-
-The NumPy 1.6 release includes a more primitive datetime data type
-than 1.7. This section documents many of the changes that have taken
-place.
-
-String Parsing
-``````````````
-
-The datetime string parser in NumPy 1.6 is very liberal in what it accepts,
-and silently allows invalid input without raising errors. The parser in
-NumPy 1.7 is quite strict about only accepting ISO 8601 dates, with a few
-convenience extensions. 1.6 always creates microsecond (us) units by
-default, whereas 1.7 detects a unit based on the format of the string.
-Here is a comparison.::
-
- # NumPy 1.6.1
- >>> np.datetime64('1979-03-22')
- 1979-03-22 00:00:00
- # NumPy 1.7.0
- >>> np.datetime64('1979-03-22')
- numpy.datetime64('1979-03-22')
-
- # NumPy 1.6.1, unit default microseconds
- >>> np.datetime64('1979-03-22').dtype
- dtype('datetime64[us]')
- # NumPy 1.7.0, unit of days detected from string
- >>> np.datetime64('1979-03-22').dtype
- dtype('<M8[D]')
-
- # NumPy 1.6.1, ignores invalid part of string
- >>> np.datetime64('1979-03-2corruptedstring')
- 1979-03-02 00:00:00
- # NumPy 1.7.0, raises error for invalid input
- >>> np.datetime64('1979-03-2corruptedstring')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ValueError: Error parsing datetime string "1979-03-2corruptedstring" at position 8
-
- # NumPy 1.6.1, 'nat' produces today's date
- >>> np.datetime64('nat')
- 2012-04-30 00:00:00
- # NumPy 1.7.0, 'nat' produces not-a-time
- >>> np.datetime64('nat')
- numpy.datetime64('NaT')
-
- # NumPy 1.6.1, 'garbage' produces today's date
- >>> np.datetime64('garbage')
- 2012-04-30 00:00:00
- # NumPy 1.7.0, 'garbage' raises an exception
- >>> np.datetime64('garbage')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ValueError: Error parsing datetime string "garbage" at position 0
-
- # NumPy 1.6.1, can't specify unit in scalar constructor
- >>> np.datetime64('1979-03-22T19:00', 'h')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: function takes at most 1 argument (2 given)
- # NumPy 1.7.0, unit in scalar constructor
- >>> np.datetime64('1979-03-22T19:00', 'h')
- numpy.datetime64('1979-03-22T19:00-0500','h')
-
- # NumPy 1.6.1, reads ISO 8601 strings w/o TZ as UTC
- >>> np.array(['1979-03-22T19:00'], dtype='M8[h]')
- array([1979-03-22 19:00:00], dtype=datetime64[h])
- # NumPy 1.7.0, reads ISO 8601 strings w/o TZ as local (ISO specifies this)
- >>> np.array(['1979-03-22T19:00'], dtype='M8[h]')
- array(['1979-03-22T19-0500'], dtype='datetime64[h]')
-
- # NumPy 1.6.1, doesn't parse all ISO 8601 strings correctly
- >>> np.array(['1979-03-22T12'], dtype='M8[h]')
- array([1979-03-22 00:00:00], dtype=datetime64[h])
- >>> np.array(['1979-03-22T12:00'], dtype='M8[h]')
- array([1979-03-22 12:00:00], dtype=datetime64[h])
- # NumPy 1.7.0, handles this case correctly
- >>> np.array(['1979-03-22T12'], dtype='M8[h]')
- array(['1979-03-22T12-0500'], dtype='datetime64[h]')
- >>> np.array(['1979-03-22T12:00'], dtype='M8[h]')
- array(['1979-03-22T12-0500'], dtype='datetime64[h]')
-
-Unit Conversion
-```````````````
-
-The 1.6 implementation of datetime does not convert between units correctly.::
-
- # NumPy 1.6.1, the representation value is untouched
- >>> np.array(['1979-03-22'], dtype='M8[D]')
- array([1979-03-22 00:00:00], dtype=datetime64[D])
- >>> np.array(['1979-03-22'], dtype='M8[D]').astype('M8[M]')
- array([2250-08-01 00:00:00], dtype=datetime64[M])
- # NumPy 1.7.0, the representation is scaled accordingly
- >>> np.array(['1979-03-22'], dtype='M8[D]')
- array(['1979-03-22'], dtype='datetime64[D]')
- >>> np.array(['1979-03-22'], dtype='M8[D]').astype('M8[M]')
- array(['1979-03'], dtype='datetime64[M]')
-
-Datetime Arithmetic
-```````````````````
-
-The 1.6 implementation of datetime only works correctly for a small subset of
-arithmetic operations. Here we show some simple cases.::
-
- # NumPy 1.6.1, produces invalid results if units are incompatible
- >>> a = np.array(['1979-03-22T12'], dtype='M8[h]')
- >>> b = np.array([3*60], dtype='m8[m]')
- >>> a + b
- array([1970-01-01 00:00:00.080988], dtype=datetime64[us])
- # NumPy 1.7.0, promotes to higher-resolution unit
- >>> a = np.array(['1979-03-22T12'], dtype='M8[h]')
- >>> b = np.array([3*60], dtype='m8[m]')
- >>> a + b
- array(['1979-03-22T15:00-0500'], dtype='datetime64[m]')
-
- # NumPy 1.6.1, arithmetic works if everything is microseconds
- >>> a = np.array(['1979-03-22T12:00'], dtype='M8[us]')
- >>> b = np.array([3*60*60*1000000], dtype='m8[us]')
- >>> a + b
- array([1979-03-22 15:00:00], dtype=datetime64[us])
- # NumPy 1.7.0
- >>> a = np.array(['1979-03-22T12:00'], dtype='M8[us]')
- >>> b = np.array([3*60*60*1000000], dtype='m8[us]')
- >>> a + b
- array(['1979-03-22T15:00:00.000000-0500'], dtype='datetime64[us]')
+.. _pandas: http://pandas.pydata.org \ No newline at end of file
diff --git a/doc/source/reference/arrays.nditer.rst b/doc/source/reference/arrays.nditer.rst
index fa8183f75..7dab09a71 100644
--- a/doc/source/reference/arrays.nditer.rst
+++ b/doc/source/reference/arrays.nditer.rst
@@ -115,13 +115,18 @@ context is exited.
array([[ 0, 2, 4],
[ 6, 8, 10]])
+If you are writing code that needs to support older versions of numpy,
+note that prior to 1.15, :class:`nditer` was not a context manager and
+did not have a `close` method. Instead it relied on the destructor to
+initiate the writeback of the buffer.
+
Using an External Loop
----------------------
In all the examples so far, the elements of `a` are provided by the
iterator one at a time, because all the looping logic is internal to the
-iterator. While this is simple and convenient, it is not very efficient. A
-better approach is to move the one-dimensional innermost loop into your
+iterator. While this is simple and convenient, it is not very efficient.
+A better approach is to move the one-dimensional innermost loop into your
code, external to the iterator. This way, NumPy's vectorized operations
can be used on larger chunks of the elements being visited.
@@ -156,41 +161,29 @@ element in a computation. For example, you may want to visit the
elements of an array in memory order, but use a C-order, Fortran-order,
or multidimensional index to look up values in a different array.
-The Python iterator protocol doesn't have a natural way to query these
-additional values from the iterator, so we introduce an alternate syntax
-for iterating with an :class:`nditer`. This syntax explicitly works
-with the iterator object itself, so its properties are readily accessible
-during iteration. With this looping construct, the current value is
-accessible by indexing into the iterator, and the index being tracked
-is the property `index` or `multi_index` depending on what was requested.
-
-The Python interactive interpreter unfortunately prints out the
-values of expressions inside the while loop during each iteration of the
-loop. We have modified the output in the examples using this looping
-construct in order to be more readable.
+The index is tracked by the iterator object itself, and accessible
+through the `index` or `multi_index` properties, depending on what was
+requested. The examples below show printouts demonstrating the
+progression of the index:
.. admonition:: Example
>>> a = np.arange(6).reshape(2,3)
>>> it = np.nditer(a, flags=['f_index'])
- >>> while not it.finished:
- ... print("%d <%d>" % (it[0], it.index), end=' ')
- ... it.iternext()
+ >>> for x in it:
+ ... print("%d <%d>" % (x, it.index), end=' ')
...
0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5>
>>> it = np.nditer(a, flags=['multi_index'])
- >>> while not it.finished:
- ... print("%d <%s>" % (it[0], it.multi_index), end=' ')
- ... it.iternext()
+ >>> for x in it:
+ ... print("%d <%s>" % (x, it.multi_index), end=' ')
...
0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)>
- >>> it = np.nditer(a, flags=['multi_index'], op_flags=['writeonly'])
- >>> with it:
- .... while not it.finished:
- ... it[0] = it.multi_index[1] - it.multi_index[0]
- ... it.iternext()
+ >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it:
+ ... for x in it:
+ ... x[...] = it.multi_index[1] - it.multi_index[0]
...
>>> a
array([[ 0, 1, 2],
@@ -199,7 +192,7 @@ construct in order to be more readable.
Tracking an index or multi-index is incompatible with using an external
loop, because it requires a different index value per element. If
you try to combine these flags, the :class:`nditer` object will
-raise an exception
+raise an exception.
.. admonition:: Example
@@ -209,6 +202,42 @@ raise an exception
File "<stdin>", line 1, in <module>
ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked
+Alternative Looping and Element Access
+--------------------------------------
+
+To make its properties more readily accessible during iteration,
+:class:`nditer` has an alternative syntax for iterating, which works
+explicitly with the iterator object itself. With this looping construct,
+the current value is accessible by indexing into the iterator. Other
+properties, such as tracked indices remain as before. The examples below
+produce identical results to the ones in the previous section.
+
+.. admonition:: Example
+
+ >>> a = np.arange(6).reshape(2,3)
+ >>> it = np.nditer(a, flags=['f_index'])
+ >>> while not it.finished:
+ ... print("%d <%d>" % (it[0], it.index), end=' ')
+ ... it.iternext()
+ ...
+ 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5>
+
+ >>> it = np.nditer(a, flags=['multi_index'])
+ >>> while not it.finished:
+ ... print("%d <%s>" % (it[0], it.multi_index), end=' ')
+ ... it.iternext()
+ ...
+ 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)>
+
+ >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it:
+ ... while not it.finished:
+ ... it[0] = it.multi_index[1] - it.multi_index[0]
+ ... it.iternext()
+ ...
+ >>> a
+ array([[ 0, 1, 2],
+ [-1, 0, 1]])
+
Buffering the Array Elements
----------------------------
diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst
index 1d9d31b83..08bf06b00 100644
--- a/doc/source/reference/c-api/array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -146,9 +146,9 @@ and itssub-types).
.. c:function:: PyObject *PyArray_GETITEM(PyArrayObject* arr, void* itemptr)
- Get a Python object of a builtin type from the ndarray, *arr*,
+ Get a Python object of a builtin type from the ndarray, *arr*,
at the location pointed to by itemptr. Return ``NULL`` on failure.
-
+
`numpy.ndarray.item` is identical to PyArray_GETITEM.
@@ -226,7 +226,7 @@ From scratch
If *data* is not ``NULL``, then it is assumed to point to the memory
to be used for the array and the *flags* argument is used as the
- new flags for the array (except the state of :c:data:`NPY_OWNDATA`,
+ new flags for the array (except the state of :c:data:`NPY_ARRAY_OWNDATA`,
:c:data:`NPY_ARRAY_WRITEBACKIFCOPY` and :c:data:`NPY_ARRAY_UPDATEIFCOPY`
flags of the new array will be reset).
@@ -997,6 +997,10 @@ argument must be a :c:type:`PyObject *<PyObject>` that can be directly interpret
called on flexible dtypes. Types that are attached to an array will always
be sized, hence the array form of this macro not existing.
+ .. versionchanged:: 1.18
+
+ For structured datatypes with no fields this function now returns False.
+
.. c:function:: PyTypeNum_ISUSERDEF(num)
.. c:function:: PyDataType_ISUSERDEF(descr)
@@ -2793,10 +2797,7 @@ Array Scalars
*arr* is not ``NULL`` and the first element is negative then
:c:data:`NPY_INTNEG_SCALAR` is returned, otherwise
:c:data:`NPY_INTPOS_SCALAR` is returned. The possible return values
- are :c:data:`NPY_{kind}_SCALAR` where ``{kind}`` can be **INTPOS**,
- **INTNEG**, **FLOAT**, **COMPLEX**, **BOOL**, or **OBJECT**.
- :c:data:`NPY_NOSCALAR` is also an enumerated value
- :c:type:`NPY_SCALARKIND` variables can take on.
+ are the enumerated values in :c:type:`NPY_SCALARKIND`.
.. c:function:: int PyArray_CanCoerceScalar( \
char thistype, char neededtype, NPY_SCALARKIND scalar)
@@ -3377,7 +3378,7 @@ Group 1
Useful to release the GIL only if *dtype* does not contain
arbitrary Python objects which may need the Python interpreter
- during execution of the loop. Equivalent to
+ during execution of the loop.
.. c:function:: NPY_END_THREADS_DESCR(PyArray_Descr *dtype)
@@ -3592,11 +3593,21 @@ Enumerated Types
A special variable type indicating the number of "kinds" of
scalars distinguished in determining scalar-coercion rules. This
- variable can take on the values :c:data:`NPY_{KIND}` where ``{KIND}`` can be
+ variable can take on the values:
+
+ .. c:var:: NPY_NOSCALAR
+
+ .. c:var:: NPY_BOOL_SCALAR
+
+ .. c:var:: NPY_INTPOS_SCALAR
+
+ .. c:var:: NPY_INTNEG_SCALAR
+
+ .. c:var:: NPY_FLOAT_SCALAR
+
+ .. c:var:: NPY_COMPLEX_SCALAR
- **NOSCALAR**, **BOOL_SCALAR**, **INTPOS_SCALAR**,
- **INTNEG_SCALAR**, **FLOAT_SCALAR**, **COMPLEX_SCALAR**,
- **OBJECT_SCALAR**
+ .. c:var:: NPY_OBJECT_SCALAR
.. c:var:: NPY_NSCALARKINDS
diff --git a/doc/source/reference/c-api/ufunc.rst b/doc/source/reference/c-api/ufunc.rst
index 92a679510..c9cc60141 100644
--- a/doc/source/reference/c-api/ufunc.rst
+++ b/doc/source/reference/c-api/ufunc.rst
@@ -198,10 +198,10 @@ Functions
to calling PyUFunc_FromFuncAndData. A copy of the string is made,
so the passed in buffer can be freed.
-.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
+.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignatureAndIdentity( \
PyUFuncGenericFunction *func, void **data, char *types, int ntypes, \
- int nin, int nout, int identity, char *name, char *doc, int unused, char *signature,
- PyObject *identity_value)
+ int nin, int nout, int identity, char *name, char *doc, int unused, \
+ char *signature, PyObject *identity_value)
This function is very similar to `PyUFunc_FromFuncAndDataAndSignature` above,
but has an extra *identity_value* argument, to define an arbitrary identity
diff --git a/doc/source/reference/random/bit_generators/mt19937.rst b/doc/source/reference/random/bit_generators/mt19937.rst
index 25ba1d7b5..71875db4e 100644
--- a/doc/source/reference/random/bit_generators/mt19937.rst
+++ b/doc/source/reference/random/bit_generators/mt19937.rst
@@ -1,9 +1,7 @@
-Mersenne Twister (MT19937)
+Mersenne Twister (MT19937)
--------------------------
-.. module:: numpy.random.mt19937
-
-.. currentmodule:: numpy.random.mt19937
+.. currentmodule:: numpy.random
.. autoclass:: MT19937
:exclude-members:
diff --git a/doc/source/reference/random/bit_generators/pcg64.rst b/doc/source/reference/random/bit_generators/pcg64.rst
index 7aef1e0dd..5881b7008 100644
--- a/doc/source/reference/random/bit_generators/pcg64.rst
+++ b/doc/source/reference/random/bit_generators/pcg64.rst
@@ -1,9 +1,7 @@
Parallel Congruent Generator (64-bit, PCG64)
--------------------------------------------
-.. module:: numpy.random.pcg64
-
-.. currentmodule:: numpy.random.pcg64
+.. currentmodule:: numpy.random
.. autoclass:: PCG64
:exclude-members:
diff --git a/doc/source/reference/random/bit_generators/philox.rst b/doc/source/reference/random/bit_generators/philox.rst
index 5e581e094..8eba2d351 100644
--- a/doc/source/reference/random/bit_generators/philox.rst
+++ b/doc/source/reference/random/bit_generators/philox.rst
@@ -1,9 +1,7 @@
Philox Counter-based RNG
------------------------
-.. module:: numpy.random.philox
-
-.. currentmodule:: numpy.random.philox
+.. currentmodule:: numpy.random
.. autoclass:: Philox
:exclude-members:
diff --git a/doc/source/reference/random/bit_generators/sfc64.rst b/doc/source/reference/random/bit_generators/sfc64.rst
index dc03820ae..d34124a33 100644
--- a/doc/source/reference/random/bit_generators/sfc64.rst
+++ b/doc/source/reference/random/bit_generators/sfc64.rst
@@ -1,9 +1,7 @@
SFC64 Small Fast Chaotic PRNG
-----------------------------
-.. module:: numpy.random.sfc64
-
-.. currentmodule:: numpy.random.sfc64
+.. currentmodule:: numpy.random
.. autoclass:: SFC64
:exclude-members:
diff --git a/doc/source/reference/random/entropy.rst b/doc/source/reference/random/entropy.rst
deleted file mode 100644
index 0664da6f9..000000000
--- a/doc/source/reference/random/entropy.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-System Entropy
-==============
-
-.. module:: numpy.random.entropy
-
-.. autofunction:: random_entropy
diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst
index c3803bcab..068143270 100644
--- a/doc/source/reference/random/generator.rst
+++ b/doc/source/reference/random/generator.rst
@@ -22,63 +22,63 @@ Accessing the BitGenerator
.. autosummary::
:toctree: generated/
- ~Generator.bit_generator
+ ~numpy.random.Generator.bit_generator
Simple random data
==================
.. autosummary::
:toctree: generated/
- ~Generator.integers
- ~Generator.random
- ~Generator.choice
- ~Generator.bytes
+ ~numpy.random.Generator.integers
+ ~numpy.random.Generator.random
+ ~numpy.random.Generator.choice
+ ~numpy.random.Generator.bytes
Permutations
============
.. autosummary::
:toctree: generated/
- ~Generator.shuffle
- ~Generator.permutation
+ ~numpy.random.Generator.shuffle
+ ~numpy.random.Generator.permutation
Distributions
=============
.. autosummary::
:toctree: generated/
- ~Generator.beta
- ~Generator.binomial
- ~Generator.chisquare
- ~Generator.dirichlet
- ~Generator.exponential
- ~Generator.f
- ~Generator.gamma
- ~Generator.geometric
- ~Generator.gumbel
- ~Generator.hypergeometric
- ~Generator.laplace
- ~Generator.logistic
- ~Generator.lognormal
- ~Generator.logseries
- ~Generator.multinomial
- ~Generator.multivariate_normal
- ~Generator.negative_binomial
- ~Generator.noncentral_chisquare
- ~Generator.noncentral_f
- ~Generator.normal
- ~Generator.pareto
- ~Generator.poisson
- ~Generator.power
- ~Generator.rayleigh
- ~Generator.standard_cauchy
- ~Generator.standard_exponential
- ~Generator.standard_gamma
- ~Generator.standard_normal
- ~Generator.standard_t
- ~Generator.triangular
- ~Generator.uniform
- ~Generator.vonmises
- ~Generator.wald
- ~Generator.weibull
- ~Generator.zipf
+ ~numpy.random.Generator.beta
+ ~numpy.random.Generator.binomial
+ ~numpy.random.Generator.chisquare
+ ~numpy.random.Generator.dirichlet
+ ~numpy.random.Generator.exponential
+ ~numpy.random.Generator.f
+ ~numpy.random.Generator.gamma
+ ~numpy.random.Generator.geometric
+ ~numpy.random.Generator.gumbel
+ ~numpy.random.Generator.hypergeometric
+ ~numpy.random.Generator.laplace
+ ~numpy.random.Generator.logistic
+ ~numpy.random.Generator.lognormal
+ ~numpy.random.Generator.logseries
+ ~numpy.random.Generator.multinomial
+ ~numpy.random.Generator.multivariate_normal
+ ~numpy.random.Generator.negative_binomial
+ ~numpy.random.Generator.noncentral_chisquare
+ ~numpy.random.Generator.noncentral_f
+ ~numpy.random.Generator.normal
+ ~numpy.random.Generator.pareto
+ ~numpy.random.Generator.poisson
+ ~numpy.random.Generator.power
+ ~numpy.random.Generator.rayleigh
+ ~numpy.random.Generator.standard_cauchy
+ ~numpy.random.Generator.standard_exponential
+ ~numpy.random.Generator.standard_gamma
+ ~numpy.random.Generator.standard_normal
+ ~numpy.random.Generator.standard_t
+ ~numpy.random.Generator.triangular
+ ~numpy.random.Generator.uniform
+ ~numpy.random.Generator.vonmises
+ ~numpy.random.Generator.wald
+ ~numpy.random.Generator.weibull
+ ~numpy.random.Generator.zipf
diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst
index 5b4dcf567..b0283f3a7 100644
--- a/doc/source/reference/random/index.rst
+++ b/doc/source/reference/random/index.rst
@@ -34,7 +34,7 @@ Quick Start
By default, `~Generator` uses bits provided by `~pcg64.PCG64` which
has better statistical properties than the legacy mt19937 random
-number generator in `~.RandomState`
+number generator in `~.RandomState`.
.. code-block:: python
@@ -151,9 +151,6 @@ What's New or Different
select distributions
* Optional ``out`` argument that allows existing arrays to be filled for
select distributions
-* `~entropy.random_entropy` provides access to the system
- source of randomness that is used in cryptographic applications (e.g.,
- ``/dev/urandom`` on Unix).
* All BitGenerators can produce doubles, uint64s and uint32s via CTypes
(`~.PCG64.ctypes`) and CFFI (`~.PCG64.cffi`). This allows the bit generators
to be used in numba.
@@ -190,7 +187,7 @@ Concepts
:maxdepth: 1
generator
- legacy mtrand <legacy>
+ Legacy Generator (RandomState) <legacy>
BitGenerators, SeedSequences <bit_generators/index>
Features
@@ -203,7 +200,6 @@ Features
new-or-different
Comparing Performance <performance>
extending
- Reading System Entropy <entropy>
Original Source
~~~~~~~~~~~~~~~
diff --git a/doc/source/reference/random/legacy.rst b/doc/source/reference/random/legacy.rst
index 04d4d3569..413a42727 100644
--- a/doc/source/reference/random/legacy.rst
+++ b/doc/source/reference/random/legacy.rst
@@ -4,7 +4,7 @@
Legacy Random Generation
------------------------
-The `~mtrand.RandomState` provides access to
+The `RandomState` provides access to
legacy generators. This generator is considered frozen and will have
no further improvements. It is guaranteed to produce the same values
as the final point release of NumPy v1.16. These all depend on Box-Muller
@@ -12,19 +12,19 @@ normals or inverse CDF exponentials or gammas. This class should only be used
if it is essential to have randoms that are identical to what
would have been produced by previous versions of NumPy.
-`~mtrand.RandomState` adds additional information
+`RandomState` adds additional information
to the state which is required when using Box-Muller normals since these
are produced in pairs. It is important to use
-`~mtrand.RandomState.get_state`, and not the underlying bit generators
+`RandomState.get_state`, and not the underlying bit generators
`state`, when accessing the state so that these extra values are saved.
-Although we provide the `~mt19937.MT19937` BitGenerator for use independent of
-`~mtrand.RandomState`, note that its default seeding uses `~SeedSequence`
-rather than the legacy seeding algorithm. `~mtrand.RandomState` will use the
+Although we provide the `MT19937` BitGenerator for use independent of
+`RandomState`, note that its default seeding uses `SeedSequence`
+rather than the legacy seeding algorithm. `RandomState` will use the
legacy seeding algorithm. The methods to use the legacy seeding algorithm are
currently private as the main reason to use them is just to implement
-`~mtrand.RandomState`. However, one can reset the state of `~mt19937.MT19937`
-using the state of the `~mtrand.RandomState`:
+`RandomState`. However, one can reset the state of `MT19937`
+using the state of the `RandomState`:
.. code-block:: python
@@ -47,8 +47,6 @@ using the state of the `~mtrand.RandomState`:
rs2.standard_exponential()
-.. currentmodule:: numpy.random.mtrand
-
.. autoclass:: RandomState
:exclude-members:
diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst
index 5442f46c9..c8815f98f 100644
--- a/doc/source/reference/random/new-or-different.rst
+++ b/doc/source/reference/random/new-or-different.rst
@@ -45,9 +45,6 @@ Feature Older Equivalent Notes
And in more detail:
-* `~.entropy.random_entropy` provides access to the system
- source of randomness that is used in cryptographic applications (e.g.,
- ``/dev/urandom`` on Unix).
* Simulate from the complex normal distribution
(`~.Generator.complex_normal`)
* The normal, exponential and gamma generators use 256-step Ziggurat
diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst
index 491bb6bff..5b2098c7a 100644
--- a/doc/source/reference/routines.ma.rst
+++ b/doc/source/reference/routines.ma.rst
@@ -264,17 +264,6 @@ Conversion operations
ma.MaskedArray.tobytes
-Pickling and unpickling
-~~~~~~~~~~~~~~~~~~~~~~~
-.. autosummary::
- :toctree: generated/
-
- ma.dump
- ma.dumps
- ma.load
- ma.loads
-
-
Filling a masked array
~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst
index c676dec07..98ce3f377 100644
--- a/doc/source/reference/routines.testing.rst
+++ b/doc/source/reference/routines.testing.rst
@@ -37,11 +37,11 @@ Decorators
.. autosummary::
:toctree: generated/
- decorators.deprecated
- decorators.knownfailureif
- decorators.setastest
- decorators.skipif
- decorators.slow
+ dec.deprecated
+ dec.knownfailureif
+ dec.setastest
+ dec.skipif
+ dec.slow
decorate_methods
Test Running
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index d00e88b34..3a3b67632 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -228,46 +228,47 @@ can generate this table for your system with the code given in the Figure.
.. admonition:: Figure
- Code segment showing the "can cast safely" table for a 32-bit system.
+ Code segment showing the "can cast safely" table for a 64-bit system.
+ Generally the output depends on the system; your system might result in
+ a different table.
+ >>> mark = {False: ' -', True: ' Y'}
>>> def print_table(ntypes):
- ... print 'X',
- ... for char in ntypes: print char,
- ... print
+ ... print('X ' + ' '.join(ntypes))
... for row in ntypes:
- ... print row,
+ ... print(row, end='')
... for col in ntypes:
- ... print int(np.can_cast(row, col)),
- ... print
+ ... print(mark[np.can_cast(row, col)], end='')
+ ... print()
+ ...
>>> print_table(np.typecodes['All'])
X ? b h i l q p B H I L Q P e f d g F D G S U V O M m
- ? 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- b 0 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0
- h 0 0 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0
- i 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- l 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- q 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- p 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- B 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0
- H 0 0 0 1 1 1 1 0 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 0
- I 0 0 0 0 1 1 1 0 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- L 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- Q 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- P 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- e 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0
- f 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0
- d 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- g 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 1 1 1 1 0 0
- F 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 0
- D 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0
- G 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0
- S 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0
- U 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0
- V 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0
- O 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0
- M 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
- m 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
-
+ ? Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
+ b - Y Y Y Y Y Y - - - - - - Y Y Y Y Y Y Y Y Y Y Y - Y
+ h - - Y Y Y Y Y - - - - - - - Y Y Y Y Y Y Y Y Y Y - Y
+ i - - - Y Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ l - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ q - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ p - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ B - - Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
+ H - - - Y Y Y Y - Y Y Y Y Y - Y Y Y Y Y Y Y Y Y Y - Y
+ I - - - - Y Y Y - - Y Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+ L - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+ Q - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+ P - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+ e - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y Y - -
+ f - - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y - -
+ d - - - - - - - - - - - - - - - Y Y - Y Y Y Y Y Y - -
+ g - - - - - - - - - - - - - - - - Y - - Y Y Y Y Y - -
+ F - - - - - - - - - - - - - - - - - Y Y Y Y Y Y Y - -
+ D - - - - - - - - - - - - - - - - - - Y Y Y Y Y Y - -
+ G - - - - - - - - - - - - - - - - - - - Y Y Y Y Y - -
+ S - - - - - - - - - - - - - - - - - - - - Y Y Y Y - -
+ U - - - - - - - - - - - - - - - - - - - - - Y Y Y - -
+ V - - - - - - - - - - - - - - - - - - - - - - Y Y - -
+ O - - - - - - - - - - - - - - - - - - - - - - Y Y - -
+ M - - - - - - - - - - - - - - - - - - - - - - Y Y Y -
+ m - - - - - - - - - - - - - - - - - - - - - - Y Y - Y
You should note that, while included in the table for completeness,
the 'S', 'U', and 'V' types cannot be operated on by ufuncs. Also,
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8dfb8db1d..fb4e2b14d 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -2,52 +2,58 @@
Release Notes
*************
-.. include:: ../release/1.18.0-notes.rst
-.. include:: ../release/1.17.0-notes.rst
-.. include:: ../release/1.16.4-notes.rst
-.. include:: ../release/1.16.3-notes.rst
-.. include:: ../release/1.16.2-notes.rst
-.. include:: ../release/1.16.1-notes.rst
-.. include:: ../release/1.16.0-notes.rst
-.. include:: ../release/1.15.4-notes.rst
-.. include:: ../release/1.15.3-notes.rst
-.. include:: ../release/1.15.2-notes.rst
-.. include:: ../release/1.15.1-notes.rst
-.. include:: ../release/1.15.0-notes.rst
-.. include:: ../release/1.14.6-notes.rst
-.. include:: ../release/1.14.5-notes.rst
-.. include:: ../release/1.14.4-notes.rst
-.. include:: ../release/1.14.3-notes.rst
-.. include:: ../release/1.14.2-notes.rst
-.. include:: ../release/1.14.1-notes.rst
-.. include:: ../release/1.14.0-notes.rst
-.. include:: ../release/1.13.3-notes.rst
-.. include:: ../release/1.13.2-notes.rst
-.. include:: ../release/1.13.1-notes.rst
-.. include:: ../release/1.13.0-notes.rst
-.. include:: ../release/1.12.1-notes.rst
-.. include:: ../release/1.12.0-notes.rst
-.. include:: ../release/1.11.3-notes.rst
-.. include:: ../release/1.11.2-notes.rst
-.. include:: ../release/1.11.1-notes.rst
-.. include:: ../release/1.11.0-notes.rst
-.. include:: ../release/1.10.4-notes.rst
-.. include:: ../release/1.10.3-notes.rst
-.. include:: ../release/1.10.2-notes.rst
-.. include:: ../release/1.10.1-notes.rst
-.. include:: ../release/1.10.0-notes.rst
-.. include:: ../release/1.9.2-notes.rst
-.. include:: ../release/1.9.1-notes.rst
-.. include:: ../release/1.9.0-notes.rst
-.. include:: ../release/1.8.2-notes.rst
-.. include:: ../release/1.8.1-notes.rst
-.. include:: ../release/1.8.0-notes.rst
-.. include:: ../release/1.7.2-notes.rst
-.. include:: ../release/1.7.1-notes.rst
-.. include:: ../release/1.7.0-notes.rst
-.. include:: ../release/1.6.2-notes.rst
-.. include:: ../release/1.6.1-notes.rst
-.. include:: ../release/1.6.0-notes.rst
-.. include:: ../release/1.5.0-notes.rst
-.. include:: ../release/1.4.0-notes.rst
-.. include:: ../release/1.3.0-notes.rst
+.. toctree::
+ :maxdepth: 3
+
+ 1.18.0 <release/1.18.0-notes>
+ 1.17.1 <release/1.17.2-notes>
+ 1.17.1 <release/1.17.1-notes>
+ 1.17.0 <release/1.17.0-notes>
+ 1.16.5 <release/1.16.5-notes>
+ 1.16.4 <release/1.16.4-notes>
+ 1.16.3 <release/1.16.3-notes>
+ 1.16.2 <release/1.16.2-notes>
+ 1.16.1 <release/1.16.1-notes>
+ 1.16.0 <release/1.16.0-notes>
+ 1.15.4 <release/1.15.4-notes>
+ 1.15.3 <release/1.15.3-notes>
+ 1.15.2 <release/1.15.2-notes>
+ 1.15.1 <release/1.15.1-notes>
+ 1.15.0 <release/1.15.0-notes>
+ 1.14.6 <release/1.14.6-notes>
+ 1.14.5 <release/1.14.5-notes>
+ 1.14.4 <release/1.14.4-notes>
+ 1.14.3 <release/1.14.3-notes>
+ 1.14.2 <release/1.14.2-notes>
+ 1.14.1 <release/1.14.1-notes>
+ 1.14.0 <release/1.14.0-notes>
+ 1.13.3 <release/1.13.3-notes>
+ 1.13.2 <release/1.13.2-notes>
+ 1.13.1 <release/1.13.1-notes>
+ 1.13.0 <release/1.13.0-notes>
+ 1.12.1 <release/1.12.1-notes>
+ 1.12.0 <release/1.12.0-notes>
+ 1.11.3 <release/1.11.3-notes>
+ 1.11.2 <release/1.11.2-notes>
+ 1.11.1 <release/1.11.1-notes>
+ 1.11.0 <release/1.11.0-notes>
+ 1.10.4 <release/1.10.4-notes>
+ 1.10.3 <release/1.10.3-notes>
+ 1.10.2 <release/1.10.2-notes>
+ 1.10.1 <release/1.10.1-notes>
+ 1.10.0 <release/1.10.0-notes>
+ 1.9.2 <release/1.9.2-notes>
+ 1.9.1 <release/1.9.1-notes>
+ 1.9.0 <release/1.9.0-notes>
+ 1.8.2 <release/1.8.2-notes>
+ 1.8.1 <release/1.8.1-notes>
+ 1.8.0 <release/1.8.0-notes>
+ 1.7.2 <release/1.7.2-notes>
+ 1.7.1 <release/1.7.1-notes>
+ 1.7.0 <release/1.7.0-notes>
+ 1.6.2 <release/1.6.2-notes>
+ 1.6.1 <release/1.6.1-notes>
+ 1.6.0 <release/1.6.0-notes>
+ 1.5.0 <release/1.5.0-notes>
+ 1.4.0 <release/1.4.0-notes>
+ 1.3.0 <release/1.3.0-notes>
diff --git a/doc/release/1.10.0-notes.rst b/doc/source/release/1.10.0-notes.rst
index 88062e463..88062e463 100644
--- a/doc/release/1.10.0-notes.rst
+++ b/doc/source/release/1.10.0-notes.rst
diff --git a/doc/release/1.10.1-notes.rst b/doc/source/release/1.10.1-notes.rst
index 4e541d279..4e541d279 100644
--- a/doc/release/1.10.1-notes.rst
+++ b/doc/source/release/1.10.1-notes.rst
diff --git a/doc/release/1.10.2-notes.rst b/doc/source/release/1.10.2-notes.rst
index 8c26b463c..8c26b463c 100644
--- a/doc/release/1.10.2-notes.rst
+++ b/doc/source/release/1.10.2-notes.rst
diff --git a/doc/release/1.10.3-notes.rst b/doc/source/release/1.10.3-notes.rst
index 0d4df4ce6..0d4df4ce6 100644
--- a/doc/release/1.10.3-notes.rst
+++ b/doc/source/release/1.10.3-notes.rst
diff --git a/doc/release/1.10.4-notes.rst b/doc/source/release/1.10.4-notes.rst
index 481928ca7..481928ca7 100644
--- a/doc/release/1.10.4-notes.rst
+++ b/doc/source/release/1.10.4-notes.rst
diff --git a/doc/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst
index 166502ac5..166502ac5 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/source/release/1.11.0-notes.rst
diff --git a/doc/release/1.11.1-notes.rst b/doc/source/release/1.11.1-notes.rst
index 6303c32f0..6303c32f0 100644
--- a/doc/release/1.11.1-notes.rst
+++ b/doc/source/release/1.11.1-notes.rst
diff --git a/doc/release/1.11.2-notes.rst b/doc/source/release/1.11.2-notes.rst
index c954089d5..c954089d5 100644
--- a/doc/release/1.11.2-notes.rst
+++ b/doc/source/release/1.11.2-notes.rst
diff --git a/doc/release/1.11.3-notes.rst b/doc/source/release/1.11.3-notes.rst
index 8381a97f7..8381a97f7 100644
--- a/doc/release/1.11.3-notes.rst
+++ b/doc/source/release/1.11.3-notes.rst
diff --git a/doc/release/1.12.0-notes.rst b/doc/source/release/1.12.0-notes.rst
index 711055d16..711055d16 100644
--- a/doc/release/1.12.0-notes.rst
+++ b/doc/source/release/1.12.0-notes.rst
diff --git a/doc/release/1.12.1-notes.rst b/doc/source/release/1.12.1-notes.rst
index f67dab108..f67dab108 100644
--- a/doc/release/1.12.1-notes.rst
+++ b/doc/source/release/1.12.1-notes.rst
diff --git a/doc/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst
index 3b719db09..3b719db09 100644
--- a/doc/release/1.13.0-notes.rst
+++ b/doc/source/release/1.13.0-notes.rst
diff --git a/doc/release/1.13.1-notes.rst b/doc/source/release/1.13.1-notes.rst
index 88a4bc3dd..88a4bc3dd 100644
--- a/doc/release/1.13.1-notes.rst
+++ b/doc/source/release/1.13.1-notes.rst
diff --git a/doc/release/1.13.2-notes.rst b/doc/source/release/1.13.2-notes.rst
index f2f9120f5..f2f9120f5 100644
--- a/doc/release/1.13.2-notes.rst
+++ b/doc/source/release/1.13.2-notes.rst
diff --git a/doc/release/1.13.3-notes.rst b/doc/source/release/1.13.3-notes.rst
index 7f7170bcc..7f7170bcc 100644
--- a/doc/release/1.13.3-notes.rst
+++ b/doc/source/release/1.13.3-notes.rst
diff --git a/doc/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst
index 462631de6..462631de6 100644
--- a/doc/release/1.14.0-notes.rst
+++ b/doc/source/release/1.14.0-notes.rst
diff --git a/doc/release/1.14.1-notes.rst b/doc/source/release/1.14.1-notes.rst
index 7b95c2e28..7b95c2e28 100644
--- a/doc/release/1.14.1-notes.rst
+++ b/doc/source/release/1.14.1-notes.rst
diff --git a/doc/release/1.14.2-notes.rst b/doc/source/release/1.14.2-notes.rst
index 3f47cb5f5..3f47cb5f5 100644
--- a/doc/release/1.14.2-notes.rst
+++ b/doc/source/release/1.14.2-notes.rst
diff --git a/doc/release/1.14.3-notes.rst b/doc/source/release/1.14.3-notes.rst
index 60b631168..60b631168 100644
--- a/doc/release/1.14.3-notes.rst
+++ b/doc/source/release/1.14.3-notes.rst
diff --git a/doc/release/1.14.4-notes.rst b/doc/source/release/1.14.4-notes.rst
index 3fb94383b..3fb94383b 100644
--- a/doc/release/1.14.4-notes.rst
+++ b/doc/source/release/1.14.4-notes.rst
diff --git a/doc/release/1.14.5-notes.rst b/doc/source/release/1.14.5-notes.rst
index 9a97cc033..9a97cc033 100644
--- a/doc/release/1.14.5-notes.rst
+++ b/doc/source/release/1.14.5-notes.rst
diff --git a/doc/release/1.14.6-notes.rst b/doc/source/release/1.14.6-notes.rst
index ac6a78272..ac6a78272 100644
--- a/doc/release/1.14.6-notes.rst
+++ b/doc/source/release/1.14.6-notes.rst
diff --git a/doc/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst
index 7235ca915..7235ca915 100644
--- a/doc/release/1.15.0-notes.rst
+++ b/doc/source/release/1.15.0-notes.rst
diff --git a/doc/release/1.15.1-notes.rst b/doc/source/release/1.15.1-notes.rst
index ddb83303c..ddb83303c 100644
--- a/doc/release/1.15.1-notes.rst
+++ b/doc/source/release/1.15.1-notes.rst
diff --git a/doc/release/1.15.2-notes.rst b/doc/source/release/1.15.2-notes.rst
index a3e61fccd..a3e61fccd 100644
--- a/doc/release/1.15.2-notes.rst
+++ b/doc/source/release/1.15.2-notes.rst
diff --git a/doc/release/1.15.3-notes.rst b/doc/source/release/1.15.3-notes.rst
index 753eecec9..753eecec9 100644
--- a/doc/release/1.15.3-notes.rst
+++ b/doc/source/release/1.15.3-notes.rst
diff --git a/doc/release/1.15.4-notes.rst b/doc/source/release/1.15.4-notes.rst
index 033bd5828..033bd5828 100644
--- a/doc/release/1.15.4-notes.rst
+++ b/doc/source/release/1.15.4-notes.rst
diff --git a/doc/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst
index 1034d6e6c..1034d6e6c 100644
--- a/doc/release/1.16.0-notes.rst
+++ b/doc/source/release/1.16.0-notes.rst
diff --git a/doc/release/1.16.1-notes.rst b/doc/source/release/1.16.1-notes.rst
index 2a190ef91..2a190ef91 100644
--- a/doc/release/1.16.1-notes.rst
+++ b/doc/source/release/1.16.1-notes.rst
diff --git a/doc/release/1.16.2-notes.rst b/doc/source/release/1.16.2-notes.rst
index 62b90dc40..62b90dc40 100644
--- a/doc/release/1.16.2-notes.rst
+++ b/doc/source/release/1.16.2-notes.rst
diff --git a/doc/release/1.16.3-notes.rst b/doc/source/release/1.16.3-notes.rst
index 181a7264d..181a7264d 100644
--- a/doc/release/1.16.3-notes.rst
+++ b/doc/source/release/1.16.3-notes.rst
diff --git a/doc/release/1.16.4-notes.rst b/doc/source/release/1.16.4-notes.rst
index a236b05c8..a236b05c8 100644
--- a/doc/release/1.16.4-notes.rst
+++ b/doc/source/release/1.16.4-notes.rst
diff --git a/doc/source/release/1.16.5-notes.rst b/doc/source/release/1.16.5-notes.rst
new file mode 100644
index 000000000..5b6eb585b
--- /dev/null
+++ b/doc/source/release/1.16.5-notes.rst
@@ -0,0 +1,68 @@
+==========================
+NumPy 1.16.5 Release Notes
+==========================
+
+The NumPy 1.16.5 release fixes bugs reported against the 1.16.4 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.7-dev, which should fix errors on Skylake series
+cpus.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS >= v0.3.7. The supported Python versions are 2.7 and
+3.5-3.7.
+
+
+Contributors
+============
+
+A total of 18 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Shadchin
+* Allan Haldane
+* Bruce Merry +
+* Charles Harris
+* Colin Snyder +
+* Dan Allan +
+* Emile +
+* Eric Wieser
+* Grey Baker +
+* Maksim Shabunin +
+* Marten van Kerkwijk
+* Matti Picus
+* Peter Andreas Entschev +
+* Ralf Gommers
+* Richard Harris +
+* Sebastian Berg
+* Sergei Lebedev +
+* Stephan Hoyer
+
+Pull requests merged
+====================
+
+A total of 23 pull requests were merged for this release.
+
+* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py
+* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy
+* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array
+* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports
+* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing
+* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs
+* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns]
+* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
+* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject
+* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors.
+* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked.
+* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers
+* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher.
+* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7
+* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
+* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor
+* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level.
+* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7
+* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1.
+* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds
+* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release.
diff --git a/doc/release/1.17.0-notes.rst b/doc/source/release/1.17.0-notes.rst
index 303f02464..8d69e36d9 100644
--- a/doc/release/1.17.0-notes.rst
+++ b/doc/source/release/1.17.0-notes.rst
@@ -1,3 +1,5 @@
+.. currentmodule:: numpy
+
==========================
NumPy 1.17.0 Release Notes
==========================
@@ -5,10 +7,10 @@ NumPy 1.17.0 Release Notes
This NumPy release contains a number of new features that should substantially
improve its performance and usefulness, see Highlights below for a summary. The
Python versions supported are 3.5-3.7, note that Python 2.7 has been dropped.
-Python 3.8b1 should work with the released source packages, but there are no
+Python 3.8b2 should work with the released source packages, but there are no
future guarantees.
-Downstream developers should use Cython >= 0.29.10 for Python 3.8 support and
+Downstream developers should use Cython >= 0.29.11 for Python 3.8 support and
OpenBLAS >= 3.7 (not currently out) to avoid problems on the Skylake
architecture. The NumPy wheels on PyPI are built from the OpenBLAS development
branch in order to avoid those problems.
@@ -17,17 +19,19 @@ branch in order to avoid those problems.
Highlights
==========
-* A new extensible random module along with four selectable random number
- generators and improved seeding designed for use in parallel processes has
- been added. The currently available bit generators are MT19937, PCG64,
- Philox, and SFC64. See below under New Features.
+* A new extensible `random` module along with four selectable `random number
+ generators <random.BitGenerators>` and improved seeding designed for use in parallel
+ processes has been added. The currently available bit generators are `MT19937
+ <random.mt19937.MT19937>`, `PCG64 <random.pcg64.PCG64>`, `Philox
+ <random.philox.Philox>`, and `SFC64 <random.sfc64.SFC64>`. See below under
+ New Features.
-* NumPy's FFT implementation was changed from fftpack to pocketfft, resulting
- in faster, more accurate transforms and better handling of datasets of
- prime length. See below under Improvements.
+* NumPy's `FFT <fft>` implementation was changed from fftpack to pocketfft,
+ resulting in faster, more accurate transforms and better handling of datasets
+ of prime length. See below under Improvements.
* New radix sort and timsort sorting methods. It is currently not possible to
- choose which will be used, but they are hardwired to the datatype and used
+ choose which will be used. They are hardwired to the datatype and used
when either ``stable`` or ``mergesort`` is passed as the method. See below
under Improvements.
@@ -44,8 +48,8 @@ New functions
Deprecations
============
-``np.polynomial`` functions warn when passed ``float`` in place of ``int``
---------------------------------------------------------------------------
+`numpy.polynomial` functions warn when passed ``float`` in place of ``int``
+---------------------------------------------------------------------------
Previously functions in this module would accept ``float`` values provided they
were integral (``1.0``, ``2.0``, etc). For consistency with the rest of numpy,
doing so is now deprecated, and in future will raise a ``TypeError``.
@@ -53,11 +57,11 @@ doing so is now deprecated, and in future will raise a ``TypeError``.
Similarly, passing a float like ``0.5`` in place of an integer will now raise a
``TypeError`` instead of the previous ``ValueError``.
-Deprecate ``numpy.distutils.exec_command`` and ``numpy.distutils.temp_file_name``
----------------------------------------------------------------------------------
+Deprecate `numpy.distutils.exec_command` and ``temp_file_name``
+---------------------------------------------------------------
The internal use of these functions has been refactored and there are better
-alternatives. Relace ``exec_command`` with `subprocess.Popen` and
-``temp_file_name`` with `tempfile.mkstemp`.
+alternatives. Replace ``exec_command`` with `subprocess.Popen` and
+`temp_file_name <numpy.distutils.exec_command>` with `tempfile.mkstemp`.
Writeable flag of C-API wrapped arrays
--------------------------------------
@@ -71,10 +75,11 @@ a manner are very rare in practice and only available through the NumPy C-API.
`numpy.nonzero` should no longer be called on 0d arrays
-------------------------------------------------------
-The behavior of nonzero on 0d arrays was surprising, making uses of it almost
-always incorrect. If the old behavior was intended, it can be preserved without
-a warning by using ``nonzero(atleast_1d(arr))`` instead of ``nonzero(arr)``.
-In a future release, it is most likely this will raise a `ValueError`.
+The behavior of `numpy.nonzero` on 0d arrays was surprising, making uses of it
+almost always incorrect. If the old behavior was intended, it can be preserved
+without a warning by using ``nonzero(atleast_1d(arr))`` instead of
+``nonzero(arr)``. In a future release, it is most likely this will raise a
+``ValueError``.
Writing to the result of `numpy.broadcast_arrays` will warn
-----------------------------------------------------------
@@ -91,6 +96,12 @@ produce the deprecation warning. To help alleviate confusion, an additional
`FutureWarning` will be emitted when accessing the ``writeable`` flag state to
clarify the contradiction.
+Note that for the C-side buffer protocol such an array will return a
+readonly buffer immediately unless a writable buffer is requested. If
+a writeable buffer is requested a warning will be given. When using
+cython, the ``const`` qualifier should be used with such arrays to avoid
+the warning (e.g. ``cdef const double[::1] view``).
+
Future Changes
==============
@@ -110,15 +121,15 @@ dtype, (1,))]`` or ``"(1,)type"`` (consistently with ``[(name, dtype, n)]``
Compatibility notes
===================
-float16 subnormal rounding
---------------------------
-Casting from a different floating point precision to float16 used incorrect
+``float16`` subnormal rounding
+------------------------------
+Casting from a different floating point precision to ``float16`` used incorrect
rounding in some edge cases. This means in rare cases, subnormal results will
now be rounded up instead of down, changing the last bit (ULP) of the result.
Signed zero when using divmod
-----------------------------
-Starting in version 1.12.0, numpy incorrectly returned a negatively signed zero
+Starting in version `1.12.0`, numpy incorrectly returned a negatively signed zero
when using the ``divmod`` and ``floor_divide`` functions when the result was
zero. For example::
@@ -134,8 +145,9 @@ zero::
``MaskedArray.mask`` now returns a view of the mask, not the mask itself
------------------------------------------------------------------------
Returning the mask itself was unsafe, as it could be reshaped in place which
-would violate expectations of the masked array code. It's behavior is now
-consistent with the ``.data`` attribute, which also returns a view.
+would violate expectations of the masked array code. The behavior of `mask
+<ma.MaskedArray.mask>` is now consistent with `data <ma.MaskedArray.data>`,
+which also returns a view.
The underlying mask can still be accessed with ``._mask`` if it is needed.
Tests that contain ``assert x.mask is not y.mask`` or similar will need to be
@@ -147,30 +159,36 @@ Looking up ``__buffer__`` attribute in `numpy.frombuffer` was undocumented and
non-functional. This code was removed. If needed, use
``frombuffer(memoryview(obj), ...)`` instead.
-``out``is buffered for memory overlaps in ``np.take``, ``np.choose``, ``np.put``
---------------------------------------------------------------------------------
+``out`` is buffered for memory overlaps in `take`, `choose`, `put`
+------------------------------------------------------------------
If the out argument to these functions is provided and has memory overlap with
the other arguments, it is now buffered to avoid order-dependent behavior.
Unpickling while loading requires explicit opt-in
-------------------------------------------------
-The functions ``np.load``, and ``np.lib.format.read_array`` take an
+The functions `load`, and ``lib.format.read_array`` take an
``allow_pickle`` keyword which now defaults to ``False`` in response to
`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+
+.. currentmodule:: numpy.random.mtrand
+
Potential changes to the random stream in old random module
-----------------------------------------------------------
-Due to bugs in the application of log to random floating point numbers,
-the stream may change when sampling from ``np.random.beta``, ``np.random.binomial``,
-``np.random.laplace``, ``np.random.logistic``, ``np.random.logseries`` or
-``np.random.multinomial`` if a 0 is generated in the underlying MT19937 random stream.
-There is a 1 in :math:`10^{53}` chance of this occurring, and so the probability that
-the stream changes for any given seed is extremely small. If a 0 is encountered in the
-underlying generator, then the incorrect value produced (either ``np.inf``
-or ``np.nan``) is now dropped.
-
-``i0`` now always returns a result with the same shape as the input
--------------------------------------------------------------------
+Due to bugs in the application of ``log`` to random floating point numbers,
+the stream may change when sampling from `~RandomState.beta`, `~RandomState.binomial`,
+`~RandomState.laplace`, `~RandomState.logistic`, `~RandomState.logseries` or
+`~RandomState.multinomial` if a ``0`` is generated in the underlying `MT19937
+<~numpy.random.mt11937.MT19937>` random stream. There is a ``1`` in
+:math:`10^{53}` chance of this occurring, so the probability that the stream
+changes for any given seed is extremely small. If a ``0`` is encountered in the
+underlying generator, then the incorrect value produced (either `numpy.inf` or
+`numpy.nan`) is now dropped.
+
+.. currentmodule:: numpy
+
+`i0` now always returns a result with the same shape as the input
+-----------------------------------------------------------------
Previously, the output was squeezed, such that, e.g., input with just a single
element would lead to an array scalar being returned, and inputs with shapes
such as ``(10, 1)`` would yield results that would not broadcast against the
@@ -179,19 +197,20 @@ input.
Note that we generally recommend the SciPy implementation over the numpy one:
it is a proper ufunc written in C, and more than an order of magnitude faster.
-``np.can_cast`` no longer assumes all unsafe casting is allowed
----------------------------------------------------------------
-Previously, ``can_cast`` returned `True` for almost all inputs for
+`can_cast` no longer assumes all unsafe casting is allowed
+----------------------------------------------------------
+Previously, `can_cast` returned `True` for almost all inputs for
``casting='unsafe'``, even for cases where casting was not possible, such as
from a structured dtype to a regular one. This has been fixed, making it
-more consistent with actual casting using, e.g., the ``.astype`` method.
+more consistent with actual casting using, e.g., the `.astype <ndarray.astype>`
+method.
-``arr.writeable`` can be switched to true slightly more often
--------------------------------------------------------------
+``ndarray.flags.writeable`` can be switched to true slightly more often
+-----------------------------------------------------------------------
In rare cases, it was not possible to switch an array from not writeable
to writeable, although a base array is writeable. This can happen if an
-intermediate ``arr.base`` object is writeable. Previously, only the deepest
+intermediate `ndarray.base` object is writeable. Previously, only the deepest
base object was considered for this decision. However, in rare cases this
object does not have the necessary information. In that case switching to
writeable was never allowed. This has now been fixed.
@@ -214,16 +233,21 @@ This change is backwards compatible, but now allows code like::
New Features
============
-New extensible random module with selectable random number generators
----------------------------------------------------------------------
-A new extensible random module along with four selectable random number
+.. currentmodule:: numpy.random
+
+New extensible `numpy.random` module with selectable random number generators
+-----------------------------------------------------------------------------
+A new extensible `numpy.random` module along with four selectable random number
generators and improved seeding designed for use in parallel processes has been
-added. The currently available bit generators are MT19937, PCG64, Philox, and
-SFC64. PCG64 is the new default while MT19937 is retained for backwards
+added. The currently available :ref:`Bit Generators <bit_generator>` are
+`~mt19937.MT19937`, `~pcg64.PCG64`, `~philox.Philox`, and `~sfc64.SFC64`.
+``PCG64`` is the new default while ``MT19937`` is retained for backwards
compatibility. Note that the legacy random module is unchanged and is now
-frozen, your current results will not change. Extensive documentation for the
-new module is available online at
-`NumPy devdocs <http://www.numpy.org/devdocs/reference/random/index.html>`_.
+frozen, your current results will not change. More information is available in
+the :ref:`API change description <new-or-different>` and in the `top-level view
+<numpy.random>` documentation.
+
+.. currentmodule:: numpy
libFLAME
--------
@@ -233,7 +257,7 @@ implementation, see
User-defined BLAS detection order
---------------------------------
-``numpy.distutils`` now uses an environment variable, comma-separated and case
+`distutils` now uses an environment variable, comma-separated and case
insensitive, to determine the detection order for BLAS libraries.
By default ``NPY_BLAS_ORDER=mkl,blis,openblas,atlas,accelerate,blas``.
However, to force the use of OpenBLAS simply do::
@@ -248,7 +272,7 @@ User-defined LAPACK detection order
-----------------------------------
``numpy.distutils`` now uses an environment variable, comma-separated and case
insensitive, to determine the detection order for LAPACK libraries.
-By default ``NPY_BLAS_ORDER=mkl,openblas,flame,atlas,accelerate,lapack``.
+By default ``NPY_LAPACK_ORDER=mkl,openblas,flame,atlas,accelerate,lapack``.
However, to force the use of OpenBLAS simply do::
NPY_LAPACK_ORDER=openblas python setup.py build
@@ -257,14 +281,14 @@ which forces the use of OpenBLAS.
This may be helpful for users which have a MKL installation but wishes to try
out different implementations.
-``np.ufunc.reduce`` and related functions now accept a ``where`` mask
----------------------------------------------------------------------
-``np.ufunc.reduce``, ``np.sum``, ``np.prod``, ``np.min``, ``np.max`` all
+`ufunc.reduce` and related functions now accept a ``where`` mask
+----------------------------------------------------------------
+`ufunc.reduce`, `sum`, `prod`, `min`, `max` all
now accept a ``where`` keyword argument, which can be used to tell which
elements to include in the reduction. For reductions that do not have an
identity, it is necessary to also pass in an initial value (e.g.,
-``initial=np.inf`` for ``np.min``). For instance, the equivalent of
-``nansum`` would be, ``np.sum(a, where=~np.isnan(a))``.
+``initial=np.inf`` for `min`). For instance, the equivalent of
+`nansum` would be ``np.sum(a, where=~np.isnan(a))``.
Timsort and radix sort have replaced mergesort for stable sorting
-----------------------------------------------------------------
@@ -275,44 +299,50 @@ each other with the actual sort implementation depending on the array type.
Radix sort is used for small integer types of 16 bits or less and timsort for
the remaining types. Timsort features improved performace on data containing
already or nearly sorted data and performs like mergesort on random data and
-requires O(n/2) working space. Details of the timsort algorithm can be found
-at
-`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
+requires :math:`O(n/2)` working space. Details of the timsort algorithm can be
+found at `CPython listsort.txt
+<https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
+
+`packbits` and `unpackbits` accept an ``order`` keyword
+-------------------------------------------------------
+The ``order`` keyword defaults to ``big``, and will order the **bits**
+accordingly. For ``'order=big'`` 3 will become ``[0, 0, 0, 0, 0, 0, 1, 1]``,
+and ``[1, 1, 0, 0, 0, 0, 0, 0]`` for ``order=little``
-``np.unpackbits`` now accepts a ``count`` parameter
----------------------------------------------------
+`unpackbits` now accepts a ``count`` parameter
+----------------------------------------------
``count`` allows subsetting the number of bits that will be unpacked up-front,
-rather than reshaping and subsetting later, making the ``packbits`` operation
+rather than reshaping and subsetting later, making the `packbits` operation
invertible, and the unpacking less wasteful. Counts larger than the number of
available bits add zero padding. Negative counts trim bits off the end instead
of counting from the beginning. None counts implement the existing behavior of
unpacking everything.
-``np.linalg.svd`` and ``np.linalg.pinv`` can be faster on hermitian inputs
---------------------------------------------------------------------------
+`linalg.svd` and `linalg.pinv` can be faster on hermitian inputs
+----------------------------------------------------------------
These functions now accept a ``hermitian`` argument, matching the one added
-to ``np.linalg.matrix_rank`` in 1.14.0.
+to `linalg.matrix_rank` in 1.14.0.
divmod operation is now supported for two ``timedelta64`` operands
------------------------------------------------------------------
-The divmod operator now handles two ``np.timedelta64`` operands, with
-type signature mm->qm.
+The divmod operator now handles two ``timedelta64`` operands, with
+type signature ``mm->qm``.
-``np.fromfile`` now takes an ``offset`` argument
-------------------------------------------------
+`fromfile` now takes an ``offset`` argument
+-------------------------------------------
This function now takes an ``offset`` keyword argument for binary files,
which specifics the offset (in bytes) from the file's current position.
-Defaults to 0.
+Defaults to ``0``.
-New mode "empty" for ``np.pad``
--------------------------------
+New mode "empty" for `pad`
+--------------------------
This mode pads an array to a desired shape without initializing the new
entries.
-``np.empty_like`` and related functions now accept a ``shape`` argument
------------------------------------------------------------------------
-``np.empty_like``, ``np.full_like``, ``np.ones_like`` and ``np.zeros_like`` now
-accept a ``shape`` keyword argument, which can be used to create a new array
+`empty_like` and related functions now accept a ``shape`` argument
+------------------------------------------------------------------
+`empty_like`, `full_like`, `ones_like` and `zeros_like` now accept a ``shape``
+keyword argument, which can be used to create a new array
as the prototype, overriding its shape as well. This is particularly useful
when combined with the ``__array_function__`` protocol, allowing the creation
of new arbitrary-shape arrays from NumPy-like libraries when such an array
@@ -340,12 +370,6 @@ equivalent.
A new format version of 3.0 has been introduced, which enables structured types
with non-latin1 field names. This is used automatically when needed.
-`numpy.packbits` and `numpy.unpackbits` accept an ``order`` keyword
--------------------------------------------------------------------
-The ``order`` keyword defaults to ``big``, and will order the **bits**
-accordingly. For ``'big'`` 3 will become ``[0, 0, 0, 0, 0, 0, 1, 1]``, and
-``[1, 1, 0, 0, 0, 0, 0, 0]`` for ``little``
-
Improvements
============
@@ -353,25 +377,25 @@ Improvements
Array comparison assertions include maximum differences
-------------------------------------------------------
Error messages from array comparison tests such as
-`np.testing.assert_allclose` now include "max absolute difference" and
+`testing.assert_allclose` now include "max absolute difference" and
"max relative difference," in addition to the previous "mismatch" percentage.
This information makes it easier to update absolute and relative error
tolerances.
-Replacement of the fftpack based FFT module by the pocketfft library
---------------------------------------------------------------------
+Replacement of the fftpack based `fft` module by the pocketfft library
+----------------------------------------------------------------------
Both implementations have the same ancestor (Fortran77 FFTPACK by Paul N.
Swarztrauber), but pocketfft contains additional modifications which improve
both accuracy and performance in some circumstances. For FFT lengths containing
large prime factors, pocketfft uses Bluestein's algorithm, which maintains
-``O(N log N)`` run time complexity instead of deteriorating towards ``O(N*N)``
-for prime lengths. Also, accuracy for real valued FFTs with near prime lengths
-has improved and is on par with complex valued FFTs.
+:math:`O(N log N)` run time complexity instead of deteriorating towards
+:math:`O(N*N)` for prime lengths. Also, accuracy for real valued FFTs with near
+prime lengths has improved and is on par with complex valued FFTs.
Further improvements to ``ctypes`` support in `numpy.ctypeslib`
---------------------------------------------------------------
A new `numpy.ctypeslib.as_ctypes_type` function has been added, which can be
-used to converts a ``dtype`` into a best-guess ``ctypes`` type. Thanks to this
+used to converts a `dtype` into a best-guess `ctypes` type. Thanks to this
new function, `numpy.ctypeslib.as_ctypes` now supports a much wider range of
array types, including structures, booleans, and integers of non-native
endianness.
@@ -383,7 +407,7 @@ Currently, if you have a function like::
def foo():
pass
-and you want to wrap the whole thing in ``errstate``, you have to rewrite it
+and you want to wrap the whole thing in `errstate`, you have to rewrite it
like so::
def foo():
@@ -400,9 +424,9 @@ thereby saving a level of indentation
`numpy.exp` and `numpy.log` speed up for float32 implementation
---------------------------------------------------------------
-float32 implementation of numpy.exp and numpy.log now benefit from AVX2/AVX512
-instruction set which are detected during runtime. numpy.exp has a max ulp
-error of 2.52 and numpy.log has a max ulp error or 3.83.
+float32 implementation of `exp` and `log` now benefit from AVX2/AVX512
+instruction set which are detected during runtime. `exp` has a max ulp
+error of 2.52 and `log` has a max ulp error or 3.83.
Improve performance of `numpy.pad`
----------------------------------
@@ -412,28 +436,28 @@ concatenation.
`numpy.interp` handles infinities more robustly
-----------------------------------------------
-In some cases where ``np.interp`` would previously return ``np.nan``, it now
+In some cases where `interp` would previously return `nan`, it now
returns an appropriate infinity.
-Pathlib support for ``np.fromfile``, ``ndarray.tofile`` and ``ndarray.dump``
-----------------------------------------------------------------------------
-``np.fromfile``, ``np.ndarray.tofile`` and ``np.ndarray.dump`` now support
+Pathlib support for `fromfile`, `tofile` and `ndarray.dump`
+-----------------------------------------------------------
+`fromfile`, `ndarray.ndarray.tofile` and `ndarray.dump` now support
the `pathlib.Path` type for the ``file``/``fid`` parameter.
-Specialized ``np.isnan``, ``np.isinf``, and ``np.isfinite`` ufuncs for bool and int types
------------------------------------------------------------------------------------------
-The boolean and integer types are incapable of storing ``np.nan`` and
-``np.inf`` values, which allows us to provide specialized ufuncs that are up to
-250x faster than the current approach.
+Specialized `isnan`, `isinf`, and `isfinite` ufuncs for bool and int types
+--------------------------------------------------------------------------
+The boolean and integer types are incapable of storing `nan` and `inf` values,
+which allows us to provide specialized ufuncs that are up to 250x faster than
+the previous approach.
-``np.isfinite`` supports ``datetime64`` and ``timedelta64`` types
+`isfinite` supports ``datetime64`` and ``timedelta64`` types
-----------------------------------------------------------------
-Previously, `np.isfinite` used to raise a ``TypeError`` on being used on these
+Previously, `isfinite` used to raise a `TypeError` on being used on these
two types.
-New keywords added to ``np.nan_to_num``
----------------------------------------
-``np.nan_to_num`` now accepts keywords ``nan``, ``posinf`` and ``neginf``
+New keywords added to `nan_to_num`
+----------------------------------
+`nan_to_num` now accepts keywords ``nan``, ``posinf`` and ``neginf``
allowing the user to define the value to replace the ``nan``, positive and
negative ``np.inf`` values respectively.
@@ -449,14 +473,14 @@ These ufuncs now call the ``__floor__``, ``__ceil__``, and ``__trunc__``
methods when called on object arrays, making them compatible with
`decimal.Decimal` and `fractions.Fraction` objects.
-``quantile`` now works on ``fraction.Fraction`` and ``decimal.Decimal`` objects
--------------------------------------------------------------------------------
+`quantile` now works on `fraction.Fraction` and `decimal.Decimal` objects
+-------------------------------------------------------------------------
In general, this handles object arrays more gracefully, and avoids floating-
point operations if exact arithmetic types are used.
-Support of object arrays in ``np.matmul``
------------------------------------------
-It is now possible to use ``np.matmul`` (or the ``@`` operator) with object arrays.
+Support of object arrays in `matmul`
+------------------------------------
+It is now possible to use `matmul` (or the ``@`` operator) with object arrays.
For instance, it is now possible to do::
from fractions import Fraction
@@ -467,10 +491,10 @@ For instance, it is now possible to do::
Changes
=======
-``median`` and ``percentile`` family of functions no longer warn about ``nan``
-------------------------------------------------------------------------------
+`median` and `percentile` family of functions no longer warn about ``nan``
+--------------------------------------------------------------------------
`numpy.median`, `numpy.percentile`, and `numpy.quantile` used to emit a
-``RuntimeWarning`` when encountering an `numpy.nan`. Since they return the
+``RuntimeWarning`` when encountering an `nan`. Since they return the
``nan`` value, the warning is redundant and has been removed.
``timedelta64 % 0`` behavior adjusted to return ``NaT``
@@ -487,16 +511,16 @@ are set, but is now always enabled.
.. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
-`numpy.lib.recfunctions.structured_to_unstructured` does not squeeze single-field views
----------------------------------------------------------------------------------------
+``lib.recfunctions.structured_to_unstructured`` does not squeeze single-field views
+-----------------------------------------------------------------------------------
Previously ``structured_to_unstructured(arr[['a']])`` would produce a squeezed
result inconsistent with ``structured_to_unstructured(arr[['a', b']])``. This
was accidental. The old behavior can be retained with
``structured_to_unstructured(arr[['a']]).squeeze(axis=-1)`` or far more simply,
``arr['a']``.
-``clip`` now uses a ufunc under the hood
-----------------------------------------
+`clip` now uses a ufunc under the hood
+--------------------------------------
This means that registering clip functions for custom dtypes in C via
``descr->f->fastclip`` is deprecated - they should use the ufunc registration
mechanism instead, attaching to the ``np.core.umath.clip`` ufunc.
@@ -524,9 +548,9 @@ Additionally, there are some corner cases with behavior changes:
------------------------------------------------------
The interface may use an ``offset`` value that was mistakenly ignored.
-Pickle protocol in ``np.savez`` set to 3 for ``force zip64`` flag
+Pickle protocol in `savez` set to 3 for ``force zip64`` flag
-----------------------------------------------------------------
-``np.savez`` was not using the ``force_zip64`` flag, which limited the size of
+`savez` was not using the ``force_zip64`` flag, which limited the size of
the archive to 2GB. But using the flag requires us to use pickle protocol 3 to
write ``object`` arrays. The protocol used was bumped to 3, meaning the archive
will be unreadable by Python2.
@@ -536,4 +560,3 @@ Structured arrays indexed with non-existent fields raise ``KeyError`` not ``Valu
``arr['bad_field']`` on a structured type raises ``KeyError``, for consistency
with ``dict['bad_field']``.
-.. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
diff --git a/doc/source/release/1.17.1-notes.rst b/doc/source/release/1.17.1-notes.rst
new file mode 100644
index 000000000..bd837ee5b
--- /dev/null
+++ b/doc/source/release/1.17.1-notes.rst
@@ -0,0 +1,73 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.1 Release Notes
+==========================
+
+This release contains a number of fixes for bugs reported against NumPy 1.17.0
+along with a few documentation and build improvements. The Python versions
+supported are 3.5-3.7, note that Python 2.7 has been dropped. Python 3.8b3
+should work with the released source packages, but there are no future
+guarantees.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid problems on the Skylake architecture. The NumPy wheels
+on PyPI are built from the OpenBLAS development branch in order to avoid those
+problems.
+
+
+Contributors
+============
+
+A total of 17 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Jung +
+* Allan Haldane
+* Charles Harris
+* Eric Wieser
+* Giuseppe Cuccu +
+* Hiroyuki V. Yamazaki
+* Jérémie du Boisberranger
+* Kmol Yuan +
+* Matti Picus
+* Max Bolingbroke +
+* Maxwell Aladago +
+* Oleksandr Pavlyk
+* Peter Andreas Entschev
+* Sergei Lebedev
+* Seth Troisi +
+* Vladimir Pershin +
+* Warren Weckesser
+
+
+Pull requests merged
+====================
+
+A total of 24 pull requests were merged for this release.
+
+* `#14156 <https://github.com/numpy/numpy/pull/14156>`__: TST: Allow fuss in testing strided/non-strided exp/log loops
+* `#14157 <https://github.com/numpy/numpy/pull/14157>`__: BUG: avx2_scalef_ps must be static
+* `#14158 <https://github.com/numpy/numpy/pull/14158>`__: BUG: Remove stray print that causes a SystemError on python 3.7.
+* `#14159 <https://github.com/numpy/numpy/pull/14159>`__: BUG: Fix DeprecationWarning in python 3.8.
+* `#14160 <https://github.com/numpy/numpy/pull/14160>`__: BLD: Add missing gcd/lcm definitions to npy_math.h
+* `#14161 <https://github.com/numpy/numpy/pull/14161>`__: DOC, BUILD: cleanups and fix (again) 'build dist'
+* `#14166 <https://github.com/numpy/numpy/pull/14166>`__: TST: Add 3.8-dev to travisCI testing.
+* `#14194 <https://github.com/numpy/numpy/pull/14194>`__: BUG: Remove the broken clip wrapper (Backport)
+* `#14198 <https://github.com/numpy/numpy/pull/14198>`__: DOC: Fix hermitian argument docs in svd.
+* `#14199 <https://github.com/numpy/numpy/pull/14199>`__: MAINT: Workaround for Intel compiler bug leading to failing test
+* `#14200 <https://github.com/numpy/numpy/pull/14200>`__: TST: Clean up of test_pocketfft.py
+* `#14201 <https://github.com/numpy/numpy/pull/14201>`__: BUG: Make advanced indexing result on read-only subclass writeable...
+* `#14236 <https://github.com/numpy/numpy/pull/14236>`__: BUG: Fixed default BitGenerator name
+* `#14237 <https://github.com/numpy/numpy/pull/14237>`__: ENH: add c-imported modules for freeze analysis in np.random
+* `#14296 <https://github.com/numpy/numpy/pull/14296>`__: TST: Pin pytest version to 5.0.1
+* `#14301 <https://github.com/numpy/numpy/pull/14301>`__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`...
+* `#14302 <https://github.com/numpy/numpy/pull/14302>`__: BUG: Fix formatting error in exception message
+* `#14307 <https://github.com/numpy/numpy/pull/14307>`__: MAINT: random: Match type of SeedSequence.pool_size to DEFAULT_POOL_SIZE.
+* `#14308 <https://github.com/numpy/numpy/pull/14308>`__: BUG: Fix numpy.random bug in platform detection
+* `#14309 <https://github.com/numpy/numpy/pull/14309>`__: ENH: Enable huge pages in all Linux builds
+* `#14330 <https://github.com/numpy/numpy/pull/14330>`__: BUG: Fix segfault in `random.permutation(x)` when x is a string.
+* `#14338 <https://github.com/numpy/numpy/pull/14338>`__: BUG: don't fail when lexsorting some empty arrays (#14228)
+* `#14339 <https://github.com/numpy/numpy/pull/14339>`__: BUG: Fix misuse of .names and .fields in various places (backport...
+* `#14345 <https://github.com/numpy/numpy/pull/14345>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14350 <https://github.com/numpy/numpy/pull/14350>`__: REL: Prepare 1.17.1 release
diff --git a/doc/source/release/1.17.2-notes.rst b/doc/source/release/1.17.2-notes.rst
new file mode 100644
index 000000000..65cdaf903
--- /dev/null
+++ b/doc/source/release/1.17.2-notes.rst
@@ -0,0 +1,49 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.2 Release Notes
+==========================
+
+This release contains fixes for bugs reported against NumPy 1.17.1 along with a
+some documentation improvements. The most important fix is for lexsort when the
+keys are of type (u)int8 or (u)int16. If you are currently using 1.17 you
+should upgrade.
+
+The Python versions supported in this release are 3.5-3.7, Python 2.7 has been
+dropped. Python 3.8b4 should work with the released source packages, but there
+are no future guarantees.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid errors on the Skylake architecture. The NumPy wheels
+on PyPI are built from the OpenBLAS development branch in order to avoid those
+errors.
+
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* CakeWithSteak +
+* Charles Harris
+* Dan Allan
+* Hameer Abbasi
+* Lars Grueter
+* Matti Picus
+* Sebastian Berg
+
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#14418 <https://github.com/numpy/numpy/pull/14418>`__: BUG: Fix aradixsort indirect indexing.
+* `#14420 <https://github.com/numpy/numpy/pull/14420>`__: DOC: Fix a minor typo in dispatch documentation.
+* `#14421 <https://github.com/numpy/numpy/pull/14421>`__: BUG: test, fix regression in converting to ctypes
+* `#14430 <https://github.com/numpy/numpy/pull/14430>`__: BUG: Do not show Override module in private error classes.
+* `#14432 <https://github.com/numpy/numpy/pull/14432>`__: BUG: Fixed maximum relative error reporting in assert_allclose.
+* `#14433 <https://github.com/numpy/numpy/pull/14433>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
+* `#14436 <https://github.com/numpy/numpy/pull/14436>`__: BUG: Update 1.17.x with 1.18.0-dev pocketfft.py.
+* `#14446 <https://github.com/numpy/numpy/pull/14446>`__: REL: Prepare for NumPy 1.17.2 release.
diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst
new file mode 100644
index 000000000..e66540410
--- /dev/null
+++ b/doc/source/release/1.18.0-notes.rst
@@ -0,0 +1,8 @@
+The NumPy 1.18 release is currently in developement. Please check
+the ``numpy/doc/release/upcoming_changes/`` folder for upcoming
+release notes.
+The ``numpy/doc/release/upcoming_changes/README.txt`` details how
+to add new release notes.
+
+For the work in progress release notes for the current development
+version, see the `devdocs <https://numpy.org/devdocs/release.html>`__.
diff --git a/doc/release/1.3.0-notes.rst b/doc/source/release/1.3.0-notes.rst
index 239714246..239714246 100644
--- a/doc/release/1.3.0-notes.rst
+++ b/doc/source/release/1.3.0-notes.rst
diff --git a/doc/release/1.4.0-notes.rst b/doc/source/release/1.4.0-notes.rst
index 9480a054e..9480a054e 100644
--- a/doc/release/1.4.0-notes.rst
+++ b/doc/source/release/1.4.0-notes.rst
diff --git a/doc/release/1.5.0-notes.rst b/doc/source/release/1.5.0-notes.rst
index a2184ab13..a2184ab13 100644
--- a/doc/release/1.5.0-notes.rst
+++ b/doc/source/release/1.5.0-notes.rst
diff --git a/doc/release/1.6.0-notes.rst b/doc/source/release/1.6.0-notes.rst
index c5f53a0eb..c5f53a0eb 100644
--- a/doc/release/1.6.0-notes.rst
+++ b/doc/source/release/1.6.0-notes.rst
diff --git a/doc/release/1.6.1-notes.rst b/doc/source/release/1.6.1-notes.rst
index 05fcb4ab9..05fcb4ab9 100644
--- a/doc/release/1.6.1-notes.rst
+++ b/doc/source/release/1.6.1-notes.rst
diff --git a/doc/release/1.6.2-notes.rst b/doc/source/release/1.6.2-notes.rst
index 8f0b06f98..8f0b06f98 100644
--- a/doc/release/1.6.2-notes.rst
+++ b/doc/source/release/1.6.2-notes.rst
diff --git a/doc/release/1.7.0-notes.rst b/doc/source/release/1.7.0-notes.rst
index f111f80dc..f111f80dc 100644
--- a/doc/release/1.7.0-notes.rst
+++ b/doc/source/release/1.7.0-notes.rst
diff --git a/doc/release/1.7.1-notes.rst b/doc/source/release/1.7.1-notes.rst
index 04216b0df..04216b0df 100644
--- a/doc/release/1.7.1-notes.rst
+++ b/doc/source/release/1.7.1-notes.rst
diff --git a/doc/release/1.7.2-notes.rst b/doc/source/release/1.7.2-notes.rst
index b0951bd72..b0951bd72 100644
--- a/doc/release/1.7.2-notes.rst
+++ b/doc/source/release/1.7.2-notes.rst
diff --git a/doc/release/1.8.0-notes.rst b/doc/source/release/1.8.0-notes.rst
index 80c39f8bc..80c39f8bc 100644
--- a/doc/release/1.8.0-notes.rst
+++ b/doc/source/release/1.8.0-notes.rst
diff --git a/doc/release/1.8.1-notes.rst b/doc/source/release/1.8.1-notes.rst
index ea34e75ac..ea34e75ac 100644
--- a/doc/release/1.8.1-notes.rst
+++ b/doc/source/release/1.8.1-notes.rst
diff --git a/doc/release/1.8.2-notes.rst b/doc/source/release/1.8.2-notes.rst
index 71e549526..71e549526 100644
--- a/doc/release/1.8.2-notes.rst
+++ b/doc/source/release/1.8.2-notes.rst
diff --git a/doc/release/1.9.0-notes.rst b/doc/source/release/1.9.0-notes.rst
index 7ea29e354..7ea29e354 100644
--- a/doc/release/1.9.0-notes.rst
+++ b/doc/source/release/1.9.0-notes.rst
diff --git a/doc/release/1.9.1-notes.rst b/doc/source/release/1.9.1-notes.rst
index 4558237f4..4558237f4 100644
--- a/doc/release/1.9.1-notes.rst
+++ b/doc/source/release/1.9.1-notes.rst
diff --git a/doc/release/1.9.2-notes.rst b/doc/source/release/1.9.2-notes.rst
index 268f3aa64..268f3aa64 100644
--- a/doc/release/1.9.2-notes.rst
+++ b/doc/source/release/1.9.2-notes.rst
diff --git a/doc/release/template.rst b/doc/source/release/template.rst
index fdfec2be9..cde7646df 100644
--- a/doc/release/template.rst
+++ b/doc/source/release/template.rst
@@ -1,3 +1,5 @@
+:orphan:
+
==========================
NumPy 1.xx.x Release Notes
==========================
diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst
index 6ef80bf8e..19e37eabc 100644
--- a/doc/source/user/basics.io.genfromtxt.rst
+++ b/doc/source/user/basics.io.genfromtxt.rst
@@ -27,13 +27,13 @@ Defining the input
==================
The only mandatory argument of :func:`~numpy.genfromtxt` is the source of
-the data. It can be a string, a list of strings, or a generator. If a
-single string is provided, it is assumed to be the name of a local or
-remote file, or an open file-like object with a :meth:`read` method, for
-example, a file or :class:`io.StringIO` object. If a list of strings
-or a generator returning strings is provided, each string is treated as one
-line in a file. When the URL of a remote file is passed, the file is
-automatically downloaded to the current directory and opened.
+the data. It can be a string, a list of strings, a generator or an open
+file-like object with a :meth:`read` method, for example, a file or
+:class:`io.StringIO` object. If a single string is provided, it is assumed
+to be the name of a local or remote file. If a list of strings or a generator
+returning strings is provided, each string is treated as one line in a file.
+When the URL of a remote file is passed, the file is automatically downloaded
+to the current directory and opened.
Recognized file types are text files and archives. Currently, the function
recognizes :class:`gzip` and :class:`bz2` (`bzip2`) archives. The type of
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index a13e1160a..b4b4371e5 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -56,7 +56,7 @@ Basic Installation
To install NumPy run::
- python setup.py install
+ pip install .
To perform an in-place build that can be run from the source folder run::
@@ -69,6 +69,15 @@ Using ``virtualenv`` should work as expected.
*Note: for build instructions to do development work on NumPy itself, see*
:ref:`development-environment`.
+Testing
+-------
+
+Make sure to test your builds. To ensure everything stays in shape, see if all tests pass::
+
+ $ python runtests.py -v -m full
+
+For detailed info on testing, see :ref:`testing-builds`.
+
.. _parallel-builds:
Parallel builds
diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst
index d4d941a5e..dd25861b4 100644
--- a/doc/source/user/c-info.beyond-basics.rst
+++ b/doc/source/user/c-info.beyond-basics.rst
@@ -300,9 +300,10 @@ An example castfunc is:
static void
double_to_float(double *from, float* to, npy_intp n,
- void* ig1, void* ig2);
- while (n--) {
- (*to++) = (double) *(from++);
+ void* ignore1, void* ignore2) {
+ while (n--) {
+ (*to++) = (double) *(from++);
+ }
}
This could then be registered to convert doubles to floats using the
diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst
index 3961325fb..00ef8ab74 100644
--- a/doc/source/user/c-info.how-to-extend.rst
+++ b/doc/source/user/c-info.how-to-extend.rst
@@ -342,7 +342,7 @@ The method is to
4. If you are writing the algorithm, then I recommend that you use the
stride information contained in the array to access the elements of
- the array (the :c:func:`PyArray_GETPTR` macros make this painless). Then,
+ the array (the :c:func:`PyArray_GetPtr` macros make this painless). Then,
you can relax your requirements so as not to force a single-segment
array and the data-copying that might result.
@@ -463,7 +463,7 @@ writeable). The syntax is
This flag is useful to specify an array that will be used for both
input and output. :c:func:`PyArray_ResolveWritebackIfCopy`
- must be called before :func:`Py_DECREF` at
+ must be called before :c:func:`Py_DECREF` at
the end of the interface routine to write back the temporary data
into the original array passed in. Use
of the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or
@@ -530,7 +530,7 @@ specific element of the array is determined only by the array of
npy_intp variables, :c:func:`PyArray_STRIDES` (obj). In particular, this
c-array of integers shows how many **bytes** must be added to the
current element pointer to get to the next element in each dimension.
-For arrays less than 4-dimensions there are :c:func:`PyArray_GETPTR{k}`
+For arrays less than 4-dimensions there are ``PyArray_GETPTR{k}``
(obj, ...) macros where {k} is the integer 1, 2, 3, or 4 that make
using the array strides easier. The arguments .... represent {k} non-
negative integer indices into the array. For example, suppose ``E`` is
@@ -543,7 +543,7 @@ contiguous arrays have particular striding patterns. Two array flags
whether or not the striding pattern of a particular array matches the
C-style contiguous or Fortran-style contiguous or neither. Whether or
not the striding pattern matches a standard C or Fortran one can be
-tested Using :c:func:`PyArray_ISCONTIGUOUS` (obj) and
+tested Using :c:func:`PyArray_IS_C_CONTIGUOUS` (obj) and
:c:func:`PyArray_ISFORTRAN` (obj) respectively. Most third-party
libraries expect contiguous arrays. But, often it is not difficult to
support general-purpose striding. I encourage you to use the striding
diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst
index 01d2a64d1..7b9b096af 100644
--- a/doc/source/user/c-info.python-as-glue.rst
+++ b/doc/source/user/c-info.python-as-glue.rst
@@ -387,7 +387,7 @@ distribution of the ``add.f`` module (as part of the package
Installation of the new package is easy using::
- python setup.py install
+ pip install .
assuming you have the proper permissions to write to the main site-
packages directory for the version of Python you are using. For the
@@ -744,14 +744,14 @@ around this restriction that allow ctypes to integrate with other
objects.
1. Don't set the argtypes attribute of the function object and define an
- :obj:`_as_parameter_` method for the object you want to pass in. The
- :obj:`_as_parameter_` method must return a Python int which will be passed
+ ``_as_parameter_`` method for the object you want to pass in. The
+ ``_as_parameter_`` method must return a Python int which will be passed
directly to the function.
2. Set the argtypes attribute to a list whose entries contain objects
with a classmethod named from_param that knows how to convert your
object to an object that ctypes can understand (an int/long, string,
- unicode, or object with the :obj:`_as_parameter_` attribute).
+ unicode, or object with the ``_as_parameter_`` attribute).
NumPy uses both methods with a preference for the second method
because it can be safer. The ctypes attribute of the ndarray returns
@@ -764,7 +764,7 @@ correct type, shape, and has the correct flags set or risk nasty
crashes if the data-pointer to inappropriate arrays are passed in.
To implement the second method, NumPy provides the class-factory
-function :func:`ndpointer` in the :mod:`ctypeslib` module. This
+function :func:`ndpointer` in the :mod:`numpy.ctypeslib` module. This
class-factory function produces an appropriate class that can be
placed in an argtypes attribute entry of a ctypes function. The class
will contain a from_param method which ctypes will use to convert any
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index c8d964599..a23a7b2c7 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -206,8 +206,8 @@ of elements that we want, instead of the step::
`empty_like`,
`arange`,
`linspace`,
- `numpy.random.rand`,
- `numpy.random.randn`,
+ `numpy.random.mtrand.RandomState.rand`,
+ `numpy.random.mtrand.RandomState.randn`,
`fromfunction`,
`fromfile`
@@ -732,9 +732,9 @@ stacks 1D arrays as columns into a 2D array. It is equivalent to
array([[ 4., 3.],
[ 2., 8.]])
-On the other hand, the function `row_stack` is equivalent to `vstack`
+On the other hand, the function `ma.row_stack` is equivalent to `vstack`
for any input arrays.
-In general, for arrays of with more than two dimensions,
+In general, for arrays with more than two dimensions,
`hstack` stacks along their second
axes, `vstack` stacks along their
first axes, and `concatenate`
diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd
new file mode 100644
index 000000000..23bd22e36
--- /dev/null
+++ b/numpy/__init__.pxd
@@ -0,0 +1,978 @@
+# NumPy static imports for Cython
+#
+# If any of the PyArray_* functions are called, import_array must be
+# called first.
+#
+# This also defines backwards-compatibility buffer acquisition
+# code for use in Python 2.x (or Python <= 2.5 when NumPy starts
+# implementing PEP-3118 directly).
+#
+# Because of laziness, the format string of the buffer is statically
+# allocated. Increase the size if this is not enough, or submit a
+# patch to do this properly.
+#
+# Author: Dag Sverre Seljebotn
+#
+
+DEF _buffer_format_string_len = 255
+
+cimport cpython.buffer as pybuf
+from cpython.ref cimport Py_INCREF
+from cpython.mem cimport PyObject_Malloc, PyObject_Free
+from cpython.object cimport PyObject, PyTypeObject
+from cpython.buffer cimport PyObject_GetBuffer
+from cpython.type cimport type
+cimport libc.stdio as stdio
+
+cdef extern from "Python.h":
+ ctypedef int Py_intptr_t
+
+cdef extern from "numpy/arrayobject.h":
+ ctypedef Py_intptr_t npy_intp
+ ctypedef size_t npy_uintp
+
+ cdef enum NPY_TYPES:
+ NPY_BOOL
+ NPY_BYTE
+ NPY_UBYTE
+ NPY_SHORT
+ NPY_USHORT
+ NPY_INT
+ NPY_UINT
+ NPY_LONG
+ NPY_ULONG
+ NPY_LONGLONG
+ NPY_ULONGLONG
+ NPY_FLOAT
+ NPY_DOUBLE
+ NPY_LONGDOUBLE
+ NPY_CFLOAT
+ NPY_CDOUBLE
+ NPY_CLONGDOUBLE
+ NPY_OBJECT
+ NPY_STRING
+ NPY_UNICODE
+ NPY_VOID
+ NPY_DATETIME
+ NPY_TIMEDELTA
+ NPY_NTYPES
+ NPY_NOTYPE
+
+ NPY_INT8
+ NPY_INT16
+ NPY_INT32
+ NPY_INT64
+ NPY_INT128
+ NPY_INT256
+ NPY_UINT8
+ NPY_UINT16
+ NPY_UINT32
+ NPY_UINT64
+ NPY_UINT128
+ NPY_UINT256
+ NPY_FLOAT16
+ NPY_FLOAT32
+ NPY_FLOAT64
+ NPY_FLOAT80
+ NPY_FLOAT96
+ NPY_FLOAT128
+ NPY_FLOAT256
+ NPY_COMPLEX32
+ NPY_COMPLEX64
+ NPY_COMPLEX128
+ NPY_COMPLEX160
+ NPY_COMPLEX192
+ NPY_COMPLEX256
+ NPY_COMPLEX512
+
+ NPY_INTP
+
+ ctypedef enum NPY_ORDER:
+ NPY_ANYORDER
+ NPY_CORDER
+ NPY_FORTRANORDER
+ NPY_KEEPORDER
+
+ ctypedef enum NPY_CASTING:
+ NPY_NO_CASTING
+ NPY_EQUIV_CASTING
+ NPY_SAFE_CASTING
+ NPY_SAME_KIND_CASTING
+ NPY_UNSAFE_CASTING
+
+ ctypedef enum NPY_CLIPMODE:
+ NPY_CLIP
+ NPY_WRAP
+ NPY_RAISE
+
+ ctypedef enum NPY_SCALARKIND:
+ NPY_NOSCALAR,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+
+ ctypedef enum NPY_SORTKIND:
+ NPY_QUICKSORT
+ NPY_HEAPSORT
+ NPY_MERGESORT
+
+ ctypedef enum NPY_SEARCHSIDE:
+ NPY_SEARCHLEFT
+ NPY_SEARCHRIGHT
+
+ enum:
+ # DEPRECATED since NumPy 1.7 ! Do not use in new code!
+ NPY_C_CONTIGUOUS
+ NPY_F_CONTIGUOUS
+ NPY_CONTIGUOUS
+ NPY_FORTRAN
+ NPY_OWNDATA
+ NPY_FORCECAST
+ NPY_ENSURECOPY
+ NPY_ENSUREARRAY
+ NPY_ELEMENTSTRIDES
+ NPY_ALIGNED
+ NPY_NOTSWAPPED
+ NPY_WRITEABLE
+ NPY_UPDATEIFCOPY
+ NPY_ARR_HAS_DESCR
+
+ NPY_BEHAVED
+ NPY_BEHAVED_NS
+ NPY_CARRAY
+ NPY_CARRAY_RO
+ NPY_FARRAY
+ NPY_FARRAY_RO
+ NPY_DEFAULT
+
+ NPY_IN_ARRAY
+ NPY_OUT_ARRAY
+ NPY_INOUT_ARRAY
+ NPY_IN_FARRAY
+ NPY_OUT_FARRAY
+ NPY_INOUT_FARRAY
+
+ NPY_UPDATE_ALL
+
+ enum:
+ # Added in NumPy 1.7 to replace the deprecated enums above.
+ NPY_ARRAY_C_CONTIGUOUS
+ NPY_ARRAY_F_CONTIGUOUS
+ NPY_ARRAY_OWNDATA
+ NPY_ARRAY_FORCECAST
+ NPY_ARRAY_ENSURECOPY
+ NPY_ARRAY_ENSUREARRAY
+ NPY_ARRAY_ELEMENTSTRIDES
+ NPY_ARRAY_ALIGNED
+ NPY_ARRAY_NOTSWAPPED
+ NPY_ARRAY_WRITEABLE
+ NPY_ARRAY_UPDATEIFCOPY
+
+ NPY_ARRAY_BEHAVED
+ NPY_ARRAY_BEHAVED_NS
+ NPY_ARRAY_CARRAY
+ NPY_ARRAY_CARRAY_RO
+ NPY_ARRAY_FARRAY
+ NPY_ARRAY_FARRAY_RO
+ NPY_ARRAY_DEFAULT
+
+ NPY_ARRAY_IN_ARRAY
+ NPY_ARRAY_OUT_ARRAY
+ NPY_ARRAY_INOUT_ARRAY
+ NPY_ARRAY_IN_FARRAY
+ NPY_ARRAY_OUT_FARRAY
+ NPY_ARRAY_INOUT_FARRAY
+
+ NPY_ARRAY_UPDATE_ALL
+
+ cdef enum:
+ NPY_MAXDIMS
+
+ npy_intp NPY_MAX_ELSIZE
+
+ ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
+
+ ctypedef struct PyArray_ArrayDescr:
+ # shape is a tuple, but Cython doesn't support "tuple shape"
+ # inside a non-PyObject declaration, so we have to declare it
+ # as just a PyObject*.
+ PyObject* shape
+
+ ctypedef struct PyArray_Descr:
+ pass
+
+ ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]:
+ # Use PyDataType_* macros when possible, however there are no macros
+ # for accessing some of the fields, so some are defined.
+ cdef PyTypeObject* typeobj
+ cdef char kind
+ cdef char type
+ # Numpy sometimes mutates this without warning (e.g. it'll
+ # sometimes change "|" to "<" in shared dtype objects on
+ # little-endian machines). If this matters to you, use
+ # PyArray_IsNativeByteOrder(dtype.byteorder) instead of
+ # directly accessing this field.
+ cdef char byteorder
+ cdef char flags
+ cdef int type_num
+ cdef int itemsize "elsize"
+ cdef int alignment
+ cdef dict fields
+ cdef tuple names
+ # Use PyDataType_HASSUBARRAY to test whether this field is
+ # valid (the pointer can be NULL). Most users should access
+ # this field via the inline helper method PyDataType_SHAPE.
+ cdef PyArray_ArrayDescr* subarray
+
+ ctypedef extern class numpy.flatiter [object PyArrayIterObject, check_size ignore]:
+ # Use through macros
+ pass
+
+ ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]:
+ cdef int numiter
+ cdef npy_intp size, index
+ cdef int nd
+ cdef npy_intp *dimensions
+ cdef void **iters
+
+ ctypedef struct PyArrayObject:
+ # For use in situations where ndarray can't replace PyArrayObject*,
+ # like PyArrayObject**.
+ pass
+
+ ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]:
+ cdef __cythonbufferdefaults__ = {"mode": "strided"}
+
+ cdef:
+ # Only taking a few of the most commonly used and stable fields.
+ # One should use PyArray_* macros instead to access the C fields.
+ char *data
+ int ndim "nd"
+ npy_intp *shape "dimensions"
+ npy_intp *strides
+ dtype descr # deprecated since NumPy 1.7 !
+ PyObject* base
+
+ # Note: This syntax (function definition in pxd files) is an
+ # experimental exception made for __getbuffer__ and __releasebuffer__
+ # -- the details of this may change.
+ def __getbuffer__(ndarray self, Py_buffer* info, int flags):
+ PyObject_GetBuffer(<object>self, info, flags);
+
+ def __releasebuffer__(ndarray self, Py_buffer* info):
+ # We should call a possible tp_bufferrelease(self, info) but no
+ # interface to that is exposed by cython or python. And currently
+ # the function is NULL in numpy, we rely on refcounting to release
+ # info when self is collected
+ pass
+
+
+ ctypedef unsigned char npy_bool
+
+ ctypedef signed char npy_byte
+ ctypedef signed short npy_short
+ ctypedef signed int npy_int
+ ctypedef signed long npy_long
+ ctypedef signed long long npy_longlong
+
+ ctypedef unsigned char npy_ubyte
+ ctypedef unsigned short npy_ushort
+ ctypedef unsigned int npy_uint
+ ctypedef unsigned long npy_ulong
+ ctypedef unsigned long long npy_ulonglong
+
+ ctypedef float npy_float
+ ctypedef double npy_double
+ ctypedef long double npy_longdouble
+
+ ctypedef signed char npy_int8
+ ctypedef signed short npy_int16
+ ctypedef signed int npy_int32
+ ctypedef signed long long npy_int64
+ ctypedef signed long long npy_int96
+ ctypedef signed long long npy_int128
+
+ ctypedef unsigned char npy_uint8
+ ctypedef unsigned short npy_uint16
+ ctypedef unsigned int npy_uint32
+ ctypedef unsigned long long npy_uint64
+ ctypedef unsigned long long npy_uint96
+ ctypedef unsigned long long npy_uint128
+
+ ctypedef float npy_float32
+ ctypedef double npy_float64
+ ctypedef long double npy_float80
+ ctypedef long double npy_float96
+ ctypedef long double npy_float128
+
+ ctypedef struct npy_cfloat:
+ double real
+ double imag
+
+ ctypedef struct npy_cdouble:
+ double real
+ double imag
+
+ ctypedef struct npy_clongdouble:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex64:
+ float real
+ float imag
+
+ ctypedef struct npy_complex128:
+ double real
+ double imag
+
+ ctypedef struct npy_complex160:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex192:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex256:
+ long double real
+ long double imag
+
+ ctypedef struct PyArray_Dims:
+ npy_intp *ptr
+ int len
+
+ int _import_array() except -1
+
+ #
+ # Macros from ndarrayobject.h
+ #
+ bint PyArray_CHKFLAGS(ndarray m, int flags)
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr)
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr)
+ bint PyArray_ISCONTIGUOUS(ndarray m)
+ bint PyArray_ISWRITEABLE(ndarray m)
+ bint PyArray_ISALIGNED(ndarray m)
+
+ int PyArray_NDIM(ndarray)
+ bint PyArray_ISONESEGMENT(ndarray)
+ bint PyArray_ISFORTRAN(ndarray)
+ int PyArray_FORTRANIF(ndarray)
+
+ void* PyArray_DATA(ndarray)
+ char* PyArray_BYTES(ndarray)
+ npy_intp* PyArray_DIMS(ndarray)
+ npy_intp* PyArray_STRIDES(ndarray)
+ npy_intp PyArray_DIM(ndarray, size_t)
+ npy_intp PyArray_STRIDE(ndarray, size_t)
+
+ PyObject *PyArray_BASE(ndarray) # returns borrowed reference!
+ PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype!
+ int PyArray_FLAGS(ndarray)
+ npy_intp PyArray_ITEMSIZE(ndarray)
+ int PyArray_TYPE(ndarray arr)
+
+ object PyArray_GETITEM(ndarray arr, void *itemptr)
+ int PyArray_SETITEM(ndarray arr, void *itemptr, object obj)
+
+ bint PyTypeNum_ISBOOL(int)
+ bint PyTypeNum_ISUNSIGNED(int)
+ bint PyTypeNum_ISSIGNED(int)
+ bint PyTypeNum_ISINTEGER(int)
+ bint PyTypeNum_ISFLOAT(int)
+ bint PyTypeNum_ISNUMBER(int)
+ bint PyTypeNum_ISSTRING(int)
+ bint PyTypeNum_ISCOMPLEX(int)
+ bint PyTypeNum_ISPYTHON(int)
+ bint PyTypeNum_ISFLEXIBLE(int)
+ bint PyTypeNum_ISUSERDEF(int)
+ bint PyTypeNum_ISEXTENDED(int)
+ bint PyTypeNum_ISOBJECT(int)
+
+ bint PyDataType_ISBOOL(dtype)
+ bint PyDataType_ISUNSIGNED(dtype)
+ bint PyDataType_ISSIGNED(dtype)
+ bint PyDataType_ISINTEGER(dtype)
+ bint PyDataType_ISFLOAT(dtype)
+ bint PyDataType_ISNUMBER(dtype)
+ bint PyDataType_ISSTRING(dtype)
+ bint PyDataType_ISCOMPLEX(dtype)
+ bint PyDataType_ISPYTHON(dtype)
+ bint PyDataType_ISFLEXIBLE(dtype)
+ bint PyDataType_ISUSERDEF(dtype)
+ bint PyDataType_ISEXTENDED(dtype)
+ bint PyDataType_ISOBJECT(dtype)
+ bint PyDataType_HASFIELDS(dtype)
+ bint PyDataType_HASSUBARRAY(dtype)
+
+ bint PyArray_ISBOOL(ndarray)
+ bint PyArray_ISUNSIGNED(ndarray)
+ bint PyArray_ISSIGNED(ndarray)
+ bint PyArray_ISINTEGER(ndarray)
+ bint PyArray_ISFLOAT(ndarray)
+ bint PyArray_ISNUMBER(ndarray)
+ bint PyArray_ISSTRING(ndarray)
+ bint PyArray_ISCOMPLEX(ndarray)
+ bint PyArray_ISPYTHON(ndarray)
+ bint PyArray_ISFLEXIBLE(ndarray)
+ bint PyArray_ISUSERDEF(ndarray)
+ bint PyArray_ISEXTENDED(ndarray)
+ bint PyArray_ISOBJECT(ndarray)
+ bint PyArray_HASFIELDS(ndarray)
+
+ bint PyArray_ISVARIABLE(ndarray)
+
+ bint PyArray_SAFEALIGNEDCOPY(ndarray)
+ bint PyArray_ISNBO(char) # works on ndarray.byteorder
+ bint PyArray_IsNativeByteOrder(char) # works on ndarray.byteorder
+ bint PyArray_ISNOTSWAPPED(ndarray)
+ bint PyArray_ISBYTESWAPPED(ndarray)
+
+ bint PyArray_FLAGSWAP(ndarray, int)
+
+ bint PyArray_ISCARRAY(ndarray)
+ bint PyArray_ISCARRAY_RO(ndarray)
+ bint PyArray_ISFARRAY(ndarray)
+ bint PyArray_ISFARRAY_RO(ndarray)
+ bint PyArray_ISBEHAVED(ndarray)
+ bint PyArray_ISBEHAVED_RO(ndarray)
+
+
+ bint PyDataType_ISNOTSWAPPED(dtype)
+ bint PyDataType_ISBYTESWAPPED(dtype)
+
+ bint PyArray_DescrCheck(object)
+
+ bint PyArray_Check(object)
+ bint PyArray_CheckExact(object)
+
+ # Cannot be supported due to out arg:
+ # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
+ # bint PyArray_HasArrayInterface(op, out)
+
+
+ bint PyArray_IsZeroDim(object)
+ # Cannot be supported due to ## ## in macro:
+ # bint PyArray_IsScalar(object, verbatim work)
+ bint PyArray_CheckScalar(object)
+ bint PyArray_IsPythonNumber(object)
+ bint PyArray_IsPythonScalar(object)
+ bint PyArray_IsAnyScalar(object)
+ bint PyArray_CheckAnyScalar(object)
+ ndarray PyArray_GETCONTIGUOUS(ndarray)
+ bint PyArray_SAMESHAPE(ndarray, ndarray)
+ npy_intp PyArray_SIZE(ndarray)
+ npy_intp PyArray_NBYTES(ndarray)
+
+ object PyArray_FROM_O(object)
+ object PyArray_FROM_OF(object m, int flags)
+ object PyArray_FROM_OT(object m, int type)
+ object PyArray_FROM_OTF(object m, int type, int flags)
+ object PyArray_FROMANY(object m, int type, int min, int max, int flags)
+ object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
+ object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
+ void PyArray_FILLWBYTE(object, int val)
+ npy_intp PyArray_REFCOUNT(object)
+ object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
+ unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
+ bint PyArray_EquivByteorders(int b1, int b2)
+ object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
+ object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
+ #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
+ object PyArray_ToScalar(void* data, ndarray arr)
+
+ void* PyArray_GETPTR1(ndarray m, npy_intp i)
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j)
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k)
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l)
+
+ void PyArray_XDECREF_ERR(ndarray)
+ # Cannot be supported due to out arg
+ # void PyArray_DESCR_REPLACE(descr)
+
+
+ object PyArray_Copy(ndarray)
+ object PyArray_FromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth)
+
+ object PyArray_Cast(ndarray mp, int type_num)
+ object PyArray_Take(ndarray ap, object items, int axis)
+ object PyArray_Put(ndarray ap, object items, object values)
+
+ void PyArray_ITER_RESET(flatiter it) nogil
+ void PyArray_ITER_NEXT(flatiter it) nogil
+ void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
+ void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
+ void* PyArray_ITER_DATA(flatiter it) nogil
+ bint PyArray_ITER_NOTDONE(flatiter it) nogil
+
+ void PyArray_MultiIter_RESET(broadcast multi) nogil
+ void PyArray_MultiIter_NEXT(broadcast multi) nogil
+ void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
+ void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
+ void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
+ void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
+ bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
+
+ # Functions from __multiarray_api.h
+
+ # Functions taking dtype and returning object/ndarray are disabled
+ # for now as they steal dtype references. I'm conservative and disable
+ # more than is probably needed until it can be checked further.
+ int PyArray_SetNumericOps (object)
+ object PyArray_GetNumericOps ()
+ int PyArray_INCREF (ndarray)
+ int PyArray_XDECREF (ndarray)
+ void PyArray_SetStringFunction (object, int)
+ dtype PyArray_DescrFromType (int)
+ object PyArray_TypeObjectFromType (int)
+ char * PyArray_Zero (ndarray)
+ char * PyArray_One (ndarray)
+ #object PyArray_CastToType (ndarray, dtype, int)
+ int PyArray_CastTo (ndarray, ndarray)
+ int PyArray_CastAnyTo (ndarray, ndarray)
+ int PyArray_CanCastSafely (int, int)
+ npy_bool PyArray_CanCastTo (dtype, dtype)
+ int PyArray_ObjectType (object, int)
+ dtype PyArray_DescrFromObject (object, dtype)
+ #ndarray* PyArray_ConvertToCommonType (object, int *)
+ dtype PyArray_DescrFromScalar (object)
+ dtype PyArray_DescrFromTypeObject (object)
+ npy_intp PyArray_Size (object)
+ #object PyArray_Scalar (void *, dtype, object)
+ #object PyArray_FromScalar (object, dtype)
+ void PyArray_ScalarAsCtype (object, void *)
+ #int PyArray_CastScalarToCtype (object, void *, dtype)
+ #int PyArray_CastScalarDirect (object, dtype, void *, int)
+ object PyArray_ScalarFromObject (object)
+ #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
+ object PyArray_FromDims (int, int *, int)
+ #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *)
+ #object PyArray_FromAny (object, dtype, int, int, int, object)
+ object PyArray_EnsureArray (object)
+ object PyArray_EnsureAnyArray (object)
+ #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
+ #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
+ #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
+ #object PyArray_FromIter (object, dtype, npy_intp)
+ object PyArray_Return (ndarray)
+ #object PyArray_GetField (ndarray, dtype, int)
+ #int PyArray_SetField (ndarray, dtype, int, object)
+ object PyArray_Byteswap (ndarray, npy_bool)
+ object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
+ int PyArray_MoveInto (ndarray, ndarray)
+ int PyArray_CopyInto (ndarray, ndarray)
+ int PyArray_CopyAnyInto (ndarray, ndarray)
+ int PyArray_CopyObject (ndarray, object)
+ object PyArray_NewCopy (ndarray, NPY_ORDER)
+ object PyArray_ToList (ndarray)
+ object PyArray_ToString (ndarray, NPY_ORDER)
+ int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *)
+ int PyArray_Dump (object, object, int)
+ object PyArray_Dumps (object, int)
+ int PyArray_ValidType (int)
+ void PyArray_UpdateFlags (ndarray, int)
+ object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object)
+ #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object)
+ #dtype PyArray_DescrNew (dtype)
+ dtype PyArray_DescrNewFromType (int)
+ double PyArray_GetPriority (object, double)
+ object PyArray_IterNew (object)
+ object PyArray_MultiIterNew (int, ...)
+
+ int PyArray_PyIntAsInt (object)
+ npy_intp PyArray_PyIntAsIntp (object)
+ int PyArray_Broadcast (broadcast)
+ void PyArray_FillObjectArray (ndarray, object)
+ int PyArray_FillWithScalar (ndarray, object)
+ npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
+ dtype PyArray_DescrNewByteorder (dtype, char)
+ object PyArray_IterAllButAxis (object, int *)
+ #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
+ #object PyArray_FromArray (ndarray, dtype, int)
+ object PyArray_FromInterface (object)
+ object PyArray_FromStructInterface (object)
+ #object PyArray_FromArrayAttr (object, dtype, object)
+ #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
+ int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
+ object PyArray_NewFlagsObject (object)
+ npy_bool PyArray_CanCastScalar (type, type)
+ #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t)
+ int PyArray_RemoveSmallest (broadcast)
+ int PyArray_ElementStrides (object)
+ void PyArray_Item_INCREF (char *, dtype)
+ void PyArray_Item_XDECREF (char *, dtype)
+ object PyArray_FieldNames (object)
+ object PyArray_Transpose (ndarray, PyArray_Dims *)
+ object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
+ object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
+ object PyArray_PutMask (ndarray, object, object)
+ object PyArray_Repeat (ndarray, object, int)
+ object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
+ int PyArray_Sort (ndarray, int, NPY_SORTKIND)
+ object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE)
+ object PyArray_ArgMax (ndarray, int, ndarray)
+ object PyArray_ArgMin (ndarray, int, ndarray)
+ object PyArray_Reshape (ndarray, object)
+ object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
+ object PyArray_Squeeze (ndarray)
+ #object PyArray_View (ndarray, dtype, type)
+ object PyArray_SwapAxes (ndarray, int, int)
+ object PyArray_Max (ndarray, int, ndarray)
+ object PyArray_Min (ndarray, int, ndarray)
+ object PyArray_Ptp (ndarray, int, ndarray)
+ object PyArray_Mean (ndarray, int, int, ndarray)
+ object PyArray_Trace (ndarray, int, int, int, int, ndarray)
+ object PyArray_Diagonal (ndarray, int, int, int)
+ object PyArray_Clip (ndarray, object, object, ndarray)
+ object PyArray_Conjugate (ndarray, ndarray)
+ object PyArray_Nonzero (ndarray)
+ object PyArray_Std (ndarray, int, int, ndarray, int)
+ object PyArray_Sum (ndarray, int, int, ndarray)
+ object PyArray_CumSum (ndarray, int, int, ndarray)
+ object PyArray_Prod (ndarray, int, int, ndarray)
+ object PyArray_CumProd (ndarray, int, int, ndarray)
+ object PyArray_All (ndarray, int, ndarray)
+ object PyArray_Any (ndarray, int, ndarray)
+ object PyArray_Compress (ndarray, object, int, ndarray)
+ object PyArray_Flatten (ndarray, NPY_ORDER)
+ object PyArray_Ravel (ndarray, NPY_ORDER)
+ npy_intp PyArray_MultiplyList (npy_intp *, int)
+ int PyArray_MultiplyIntList (int *, int)
+ void * PyArray_GetPtr (ndarray, npy_intp*)
+ int PyArray_CompareLists (npy_intp *, npy_intp *, int)
+ #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
+ #int PyArray_As1D (object*, char **, int *, int)
+ #int PyArray_As2D (object*, char ***, int *, int *, int)
+ int PyArray_Free (object, void *)
+ #int PyArray_Converter (object, object*)
+ int PyArray_IntpFromSequence (object, npy_intp *, int)
+ object PyArray_Concatenate (object, int)
+ object PyArray_InnerProduct (object, object)
+ object PyArray_MatrixProduct (object, object)
+ object PyArray_CopyAndTranspose (object)
+ object PyArray_Correlate (object, object, int)
+ int PyArray_TypestrConvert (int, int)
+ #int PyArray_DescrConverter (object, dtype*)
+ #int PyArray_DescrConverter2 (object, dtype*)
+ int PyArray_IntpConverter (object, PyArray_Dims *)
+ #int PyArray_BufferConverter (object, chunk)
+ int PyArray_AxisConverter (object, int *)
+ int PyArray_BoolConverter (object, npy_bool *)
+ int PyArray_ByteorderConverter (object, char *)
+ int PyArray_OrderConverter (object, NPY_ORDER *)
+ unsigned char PyArray_EquivTypes (dtype, dtype)
+ #object PyArray_Zeros (int, npy_intp *, dtype, int)
+ #object PyArray_Empty (int, npy_intp *, dtype, int)
+ object PyArray_Where (object, object, object)
+ object PyArray_Arange (double, double, double, int)
+ #object PyArray_ArangeObj (object, object, object, dtype)
+ int PyArray_SortkindConverter (object, NPY_SORTKIND *)
+ object PyArray_LexSort (object, int)
+ object PyArray_Round (ndarray, int, ndarray)
+ unsigned char PyArray_EquivTypenums (int, int)
+ int PyArray_RegisterDataType (dtype)
+ int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *)
+ int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND)
+ #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
+ object PyArray_IntTupleFromIntp (int, npy_intp *)
+ int PyArray_TypeNumFromName (char *)
+ int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *)
+ #int PyArray_OutputConverter (object, ndarray*)
+ object PyArray_BroadcastToShape (object, npy_intp *, int)
+ void _PyArray_SigintHandler (int)
+ void* _PyArray_GetSigintBuf ()
+ #int PyArray_DescrAlignConverter (object, dtype*)
+ #int PyArray_DescrAlignConverter2 (object, dtype*)
+ int PyArray_SearchsideConverter (object, void *)
+ object PyArray_CheckAxis (ndarray, int *, int)
+ npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
+ int PyArray_CompareString (char *, char *, size_t)
+ int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead.
+
+
+# Typedefs that matches the runtime dtype objects in
+# the numpy module.
+
+# The ones that are commented out needs an IFDEF function
+# in Cython to enable them only on the right systems.
+
+ctypedef npy_int8 int8_t
+ctypedef npy_int16 int16_t
+ctypedef npy_int32 int32_t
+ctypedef npy_int64 int64_t
+#ctypedef npy_int96 int96_t
+#ctypedef npy_int128 int128_t
+
+ctypedef npy_uint8 uint8_t
+ctypedef npy_uint16 uint16_t
+ctypedef npy_uint32 uint32_t
+ctypedef npy_uint64 uint64_t
+#ctypedef npy_uint96 uint96_t
+#ctypedef npy_uint128 uint128_t
+
+ctypedef npy_float32 float32_t
+ctypedef npy_float64 float64_t
+#ctypedef npy_float80 float80_t
+#ctypedef npy_float128 float128_t
+
+ctypedef float complex complex64_t
+ctypedef double complex complex128_t
+
+# The int types are mapped a bit surprising --
+# numpy.int corresponds to 'l' and numpy.long to 'q'
+ctypedef npy_long int_t
+ctypedef npy_longlong long_t
+ctypedef npy_longlong longlong_t
+
+ctypedef npy_ulong uint_t
+ctypedef npy_ulonglong ulong_t
+ctypedef npy_ulonglong ulonglong_t
+
+ctypedef npy_intp intp_t
+ctypedef npy_uintp uintp_t
+
+ctypedef npy_double float_t
+ctypedef npy_double double_t
+ctypedef npy_longdouble longdouble_t
+
+ctypedef npy_cfloat cfloat_t
+ctypedef npy_cdouble cdouble_t
+ctypedef npy_clongdouble clongdouble_t
+
+ctypedef npy_cdouble complex_t
+
+cdef inline object PyArray_MultiIterNew1(a):
+ return PyArray_MultiIterNew(1, <void*>a)
+
+cdef inline object PyArray_MultiIterNew2(a, b):
+ return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+
+cdef inline object PyArray_MultiIterNew3(a, b, c):
+ return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+
+cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+
+cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+
+cdef inline tuple PyDataType_SHAPE(dtype d):
+ if PyDataType_HASSUBARRAY(d):
+ return <tuple>d.subarray.shape
+ else:
+ return ()
+
+cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
+ # Recursive utility function used in __getbuffer__ to get format
+ # string. The new location in the format string is returned.
+
+ cdef dtype child
+ cdef int endian_detector = 1
+ cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
+ cdef tuple fields
+
+ for childname in descr.names:
+ fields = descr.fields[childname]
+ child, new_offset = fields
+
+ if (end - f) - <int>(new_offset - offset[0]) < 15:
+ raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+
+ if ((child.byteorder == c'>' and little_endian) or
+ (child.byteorder == c'<' and not little_endian)):
+ raise ValueError(u"Non-native byte order not supported")
+ # One could encode it in the format string and have Cython
+ # complain instead, BUT: < and > in format strings also imply
+ # standardized sizes for datatypes, and we rely on native in
+ # order to avoid reencoding data types based on their size.
+ #
+ # A proper PEP 3118 exporter for other clients than Cython
+ # must deal properly with this!
+
+ # Output padding bytes
+ while offset[0] < new_offset:
+ f[0] = 120 # "x"; pad byte
+ f += 1
+ offset[0] += 1
+
+ offset[0] += child.itemsize
+
+ if not PyDataType_HASFIELDS(child):
+ t = child.type_num
+ if end - f < 5:
+ raise RuntimeError(u"Format string allocated too short.")
+
+ # Until ticket #99 is fixed, use integers to avoid warnings
+ if t == NPY_BYTE: f[0] = 98 #"b"
+ elif t == NPY_UBYTE: f[0] = 66 #"B"
+ elif t == NPY_SHORT: f[0] = 104 #"h"
+ elif t == NPY_USHORT: f[0] = 72 #"H"
+ elif t == NPY_INT: f[0] = 105 #"i"
+ elif t == NPY_UINT: f[0] = 73 #"I"
+ elif t == NPY_LONG: f[0] = 108 #"l"
+ elif t == NPY_ULONG: f[0] = 76 #"L"
+ elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ elif t == NPY_FLOAT: f[0] = 102 #"f"
+ elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ elif t == NPY_OBJECT: f[0] = 79 #"O"
+ else:
+ raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ f += 1
+ else:
+ # Cython ignores struct boundary information ("T{...}"),
+ # so don't output it
+ f = _util_dtypestring(child, f, end, offset)
+ return f
+
+
+#
+# ufunc API
+#
+
+cdef extern from "numpy/ufuncobject.h":
+
+ ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
+
+ ctypedef extern class numpy.ufunc [object PyUFuncObject, check_size ignore]:
+ cdef:
+ int nin, nout, nargs
+ int identity
+ PyUFuncGenericFunction *functions
+ void **data
+ int ntypes
+ int check_return
+ char *name
+ char *types
+ char *doc
+ void *ptr
+ PyObject *obj
+ PyObject *userloops
+
+ cdef enum:
+ PyUFunc_Zero
+ PyUFunc_One
+ PyUFunc_None
+ UFUNC_ERR_IGNORE
+ UFUNC_ERR_WARN
+ UFUNC_ERR_RAISE
+ UFUNC_ERR_CALL
+ UFUNC_ERR_PRINT
+ UFUNC_ERR_LOG
+ UFUNC_MASK_DIVIDEBYZERO
+ UFUNC_MASK_OVERFLOW
+ UFUNC_MASK_UNDERFLOW
+ UFUNC_MASK_INVALID
+ UFUNC_SHIFT_DIVIDEBYZERO
+ UFUNC_SHIFT_OVERFLOW
+ UFUNC_SHIFT_UNDERFLOW
+ UFUNC_SHIFT_INVALID
+ UFUNC_FPE_DIVIDEBYZERO
+ UFUNC_FPE_OVERFLOW
+ UFUNC_FPE_UNDERFLOW
+ UFUNC_FPE_INVALID
+ UFUNC_ERR_DEFAULT
+ UFUNC_ERR_DEFAULT2
+
+ object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
+ void **, char *, int, int, int, int, char *, char *, int)
+ int PyUFunc_RegisterLoopForType(ufunc, int,
+ PyUFuncGenericFunction, int *, void *)
+ int PyUFunc_GenericFunction \
+ (ufunc, PyObject *, PyObject *, PyArrayObject **)
+ void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_f_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_g_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_G_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_gg_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_GG_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_On_Om \
+ (char **, npy_intp *, npy_intp *, void *)
+ int PyUFunc_GetPyValues \
+ (char *, int *, int *, PyObject **)
+ int PyUFunc_checkfperr \
+ (int, PyObject *, int *)
+ void PyUFunc_clearfperr()
+ int PyUFunc_getfperr()
+ int PyUFunc_handlefperr \
+ (int, PyObject *, int, int *)
+ int PyUFunc_ReplaceLoopBySignature \
+ (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
+ object PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int,
+ int, char *, char *, int, char *)
+
+ int _import_umath() except -1
+
+cdef inline void set_array_base(ndarray arr, object base):
+ Py_INCREF(base) # important to do this before stealing the reference below!
+ PyArray_SetBaseObject(arr, base)
+
+cdef inline object get_array_base(ndarray arr):
+ base = PyArray_BASE(arr)
+ if base is NULL:
+ return None
+ return <object>base
+
+# Versions of the import_* functions which are more suitable for
+# Cython code.
+cdef inline int import_array() except -1:
+ try:
+ _import_array()
+ except Exception:
+ raise ImportError("numpy.core.multiarray failed to import")
+
+cdef inline int import_umath() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
+
+cdef inline int import_ufunc() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
diff --git a/numpy/__init__.py b/numpy/__init__.py
index ba88c733f..fef8245de 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -143,7 +143,9 @@ else:
from .core import *
from . import compat
from . import lib
+ # FIXME: why have numpy.lib if everything is imported here??
from .lib import *
+
from . import linalg
from . import fft
from . import polynomial
@@ -166,12 +168,22 @@ else:
# now that numpy modules are imported, can initialize limits
core.getlimits._register_known_types()
+ __all__.extend(['bool', 'int', 'float', 'complex', 'object', 'unicode',
+ 'str'])
__all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
+ # Remove things that are in the numpy.lib but not in the numpy namespace
+ # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
+ # that prevents adding more things to the main namespace by accident.
+ # The list below will grow until the `from .lib import *` fixme above is
+ # taken care of
+ __all__.remove('Arrayterator')
+ del Arrayterator
+
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
@@ -182,9 +194,34 @@ else:
oldnumeric = 'removed'
numarray = 'removed'
- # We don't actually use this ourselves anymore, but I'm not 100% sure that
- # no-one else in the world is using it (though I hope not)
- from .testing import Tester
+ if sys.version_info[:2] >= (3, 7):
+ # Importing Tester requires importing all of UnitTest which is not a
+ # cheap import Since it is mainly used in test suits, we lazy import it
+ # here to save on the order of 10 ms of import time for most users
+ #
+ # The previous way Tester was imported also had a side effect of adding
+ # the full `numpy.testing` namespace
+ #
+ # module level getattr is only supported in 3.7 onwards
+ # https://www.python.org/dev/peps/pep-0562/
+ def __getattr__(attr):
+ if attr == 'testing':
+ import numpy.testing as testing
+ return testing
+ elif attr == 'Tester':
+ from .testing import Tester
+ return Tester
+ else:
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
+
+ def __dir__():
+ return __all__ + ['Tester', 'testing']
+
+ else:
+ # We don't actually use this ourselves anymore, but I'm not 100% sure that
+ # no-one else in the world is using it (though I hope not)
+ from .testing import Tester
# Pytest testing
from numpy._pytesttester import PytestTester
diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py
index 8d1a3811c..b25224c20 100644
--- a/numpy/_pytesttester.py
+++ b/numpy/_pytesttester.py
@@ -48,10 +48,9 @@ class PytestTester(object):
"""
Pytest test runner.
- This class is made available in ``numpy.testing``, and a test function
- is typically added to a package's __init__.py like so::
+ A test function is typically added to a package's __init__.py like so::
- from numpy.testing import PytestTester
+ from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
@@ -68,6 +67,12 @@ class PytestTester(object):
module_name : module name
The name of the module to test.
+ Notes
+ -----
+ Unlike the previous ``nose``-based implementation, this class is not
+ publicly exposed as it performs some ``numpy``-specific warning
+ suppression.
+
"""
def __init__(self, module_name):
self.module_name = module_name
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index ce443bb22..c3b3f0392 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -1,6 +1,13 @@
+"""
+Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
+
+Please note that this module is private. All functions and objects
+are available in the main ``numpy`` namespace - use that instead.
+
+"""
+
from __future__ import division, absolute_import, print_function
-from .info import __doc__
from numpy.version import version as __version__
import os
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 033e3809c..30379dfbe 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -386,12 +386,12 @@ add_newdoc('numpy.core', 'nditer',
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
- If operand flags `"writeonly"` or `"readwrite"` are used the operands may
- be views into the original data with the `WRITEBACKIFCOPY` flag. In this case
- nditer must be used as a context manager or the nditer.close
- method must be called before using the result. The temporary
- data will be written back to the original data when the `__exit__`
- function is called but not before:
+ If operand flags `"writeonly"` or `"readwrite"` are used the
+ operands may be views into the original data with the
+ `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
+ context manager or the `nditer.close` method must be called before
+ using the result. The temporary data will be written back to the
+ original data when the `__exit__` function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with np.nditer(a, [],
@@ -413,6 +413,8 @@ add_newdoc('numpy.core', 'nditer',
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
+ Context management and the `close` method appeared in version 1.15.0.
+
""")
# nditer methods
@@ -568,6 +570,8 @@ add_newdoc('numpy.core', 'nditer', ('close',
Resolve all writeback semantics in writeable operands.
+ .. versionadded:: 1.15.0
+
See Also
--------
@@ -1347,7 +1351,7 @@ add_newdoc('numpy.core.multiarray', 'arange',
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
- Return the compile time NDARRAY_VERSION number.
+ Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
""")
diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py
index 3a12c8fad..df1ff180e 100644
--- a/numpy/core/_dtype.py
+++ b/numpy/core/_dtype.py
@@ -252,7 +252,7 @@ def _is_packed(dtype):
from a list of the field names and dtypes with no additional
dtype parameters.
- Duplicates the C `is_dtype_struct_simple_unaligned_layout` functio.
+ Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
"""
total_offset = 0
for name in dtype.names:
@@ -316,26 +316,39 @@ def _subarray_str(dtype):
)
+def _name_includes_bit_suffix(dtype):
+ if dtype.type == np.object_:
+ # pointer size varies by system, best to omit it
+ return False
+ elif dtype.type == np.bool_:
+ # implied
+ return False
+ elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
+ # unspecified
+ return False
+ else:
+ return True
+
+
def _name_get(dtype):
- # provides dtype.name.__get__
+ # provides dtype.name.__get__, documented as returning a "bit name"
if dtype.isbuiltin == 2:
# user dtypes don't promise to do anything special
return dtype.type.__name__
- # Builtin classes are documented as returning a "bit name"
- name = dtype.type.__name__
-
- # handle bool_, str_, etc
- if name[-1] == '_':
- name = name[:-1]
+ if issubclass(dtype.type, np.void):
+ # historically, void subclasses preserve their name, eg `record64`
+ name = dtype.type.__name__
+ else:
+ name = _kind_name(dtype)
- # append bit counts to str, unicode, and void
- if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype):
+ # append bit counts
+ if _name_includes_bit_suffix(dtype):
name += "{}".format(dtype.itemsize * 8)
# append metadata to datetimes
- elif dtype.type in (np.datetime64, np.timedelta64):
+ if dtype.type in (np.datetime64, np.timedelta64):
name += _datetime_metadata_str(dtype)
return name
diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py
index a1af7a78d..88a45561f 100644
--- a/numpy/core/_exceptions.py
+++ b/numpy/core/_exceptions.py
@@ -27,6 +27,7 @@ def _display_as_base(cls):
assert issubclass(cls, Exception)
cls.__name__ = cls.__base__.__name__
cls.__qualname__ = cls.__base__.__qualname__
+ set_module(cls.__base__.__module__)(cls)
return cls
@@ -146,6 +147,54 @@ class _ArrayMemoryError(MemoryError):
self.shape = shape
self.dtype = dtype
- def __str__(self):
- return "Unable to allocate array with shape {} and data type {}".format(self.shape, self.dtype)
+ @property
+ def _total_size(self):
+ num_bytes = self.dtype.itemsize
+ for dim in self.shape:
+ num_bytes *= dim
+ return num_bytes
+
+ @staticmethod
+ def _size_to_string(num_bytes):
+ """ Convert a number of bytes into a binary size string """
+ import math
+
+ # https://en.wikipedia.org/wiki/Binary_prefix
+ LOG2_STEP = 10
+ STEP = 1024
+ units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
+
+ unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
+ unit_val = 1 << (unit_i * LOG2_STEP)
+ n_units = num_bytes / unit_val
+ del unit_val
+
+ # ensure we pick a unit that is correct after rounding
+ if round(n_units) == STEP:
+ unit_i += 1
+ n_units /= STEP
+
+ # deal with sizes so large that we don't have units for them
+ if unit_i >= len(units):
+ new_unit_i = len(units) - 1
+ n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
+ unit_i = new_unit_i
+
+ unit_name = units[unit_i]
+ # format with a sensible number of digits
+ if unit_i == 0:
+ # no decimal point on bytes
+ return '{:.0f} {}'.format(n_units, unit_name)
+ elif round(n_units) < 1000:
+ # 3 significant figures, if none are dropped to the left of the .
+ return '{:#.3g} {}'.format(n_units, unit_name)
+ else:
+ # just give all the digits otherwise
+ return '{:#.0f} {}'.format(n_units, unit_name)
+ def __str__(self):
+ size_str = self._size_to_string(self._total_size)
+ return (
+ "Unable to allocate {} for an array with shape {} and data type {}"
+ .format(size_str, self.shape, self.dtype)
+ )
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index c70718cb6..b0ea603e1 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -459,7 +459,7 @@ def _getfield_is_safe(oldtype, newtype, offset):
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
- if oldtype.names:
+ if oldtype.names is not None:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 108364824..b1310a737 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -89,8 +89,10 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None,
"`False`", stacklevel=3)
if threshold is not None:
# forbid the bad threshold arg suggested by stack overflow, gh-12351
- if not isinstance(threshold, numbers.Number) or np.isnan(threshold):
- raise ValueError("threshold must be numeric and non-NAN, try "
+ if not isinstance(threshold, numbers.Number):
+ raise TypeError("threshold must be numeric")
+ if np.isnan(threshold):
+ raise ValueError("threshold must be non-NAN, try "
"sys.maxsize for untruncated representation")
return options
@@ -192,7 +194,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
See Also
--------
- get_printoptions, set_string_function, array2string
+ get_printoptions, printoptions, set_string_function, array2string
Notes
-----
@@ -283,7 +285,7 @@ def get_printoptions():
See Also
--------
- set_printoptions, set_string_function
+ set_printoptions, printoptions, set_string_function
"""
return _format_options.copy()
@@ -683,7 +685,7 @@ def array2string(a, max_line_width=None, precision=None,
if style is np._NoValue:
style = repr
- if a.shape == () and not a.dtype.names:
+ if a.shape == () and a.dtype.names is None:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
@@ -982,20 +984,6 @@ class FloatingFormat(object):
pad_left=self.pad_left,
pad_right=self.pad_right)
-# for back-compatibility, we keep the classes for each float type too
-class FloatFormat(FloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn("FloatFormat has been replaced by FloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(FloatFormat, self).__init__(*args, **kwargs)
-
-
-class LongFloatFormat(FloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn("LongFloatFormat has been replaced by FloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(LongFloatFormat, self).__init__(*args, **kwargs)
-
@set_module('numpy')
def format_float_scientific(x, precision=None, unique=True, trim='k',
@@ -1194,21 +1182,6 @@ class ComplexFloatingFormat(object):
return r + i
-# for back-compatibility, we keep the classes for each complex type too
-class ComplexFormat(ComplexFloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn(
- "ComplexFormat has been replaced by ComplexFloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(ComplexFormat, self).__init__(*args, **kwargs)
-
-class LongComplexFormat(ComplexFloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn(
- "LongComplexFormat has been replaced by ComplexFloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(LongComplexFormat, self).__init__(*args, **kwargs)
-
class _TimelikeFormat(object):
def __init__(self, data):
@@ -1319,16 +1292,6 @@ class StructuredVoidFormat(object):
return "({})".format(", ".join(str_fields))
-# for backwards compatibility
-class StructureFormat(StructuredVoidFormat):
- def __init__(self, *args, **kwargs):
- # NumPy 1.14, 2018-02-14
- warnings.warn(
- "StructureFormat has been replaced by StructuredVoidFormat",
- DeprecationWarning, stacklevel=2)
- super(StructureFormat, self).__init__(*args, **kwargs)
-
-
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index 923c34425..7336e5e13 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -259,7 +259,8 @@ def find_functions(filename, tag='API'):
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
- fargs_str = ' '.join(function_args).rstrip(' )')
+ # remove any white space and the closing bracket:
+ fargs_str = ' '.join(function_args).rstrip()[:-1].rstrip()
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index bf1747272..6729fe197 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -662,14 +662,18 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cos'),
None,
- TD(inexact, f='cos', astype={'e':'f'}),
+ TD('e', f='cos', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='cos'),
TD(P, f='cos'),
),
'sin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sin'),
None,
- TD(inexact, f='sin', astype={'e':'f'}),
+ TD('e', f='sin', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='sin'),
TD(P, f='sin'),
),
'tan':
@@ -705,8 +709,8 @@ defdict = {
docstrings.get('numpy.core.umath.exp'),
None,
TD('e', f='exp', astype={'e':'f'}),
- TD('f', simd=[('avx2', 'f'), ('avx512f', 'f')]),
- TD(inexact, f='exp', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='exp'),
TD(P, f='exp'),
),
'exp2':
@@ -728,8 +732,8 @@ defdict = {
docstrings.get('numpy.core.umath.log'),
None,
TD('e', f='log', astype={'e':'f'}),
- TD('f', simd=[('avx2', 'f'), ('avx512f', 'f')]),
- TD(inexact, f='log', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='log'),
TD(P, f='log'),
),
'log2':
@@ -759,7 +763,7 @@ defdict = {
None,
TD('e', f='sqrt', astype={'e':'f'}),
TD(inexactvec),
- TD(inexact, f='sqrt', astype={'e':'f'}),
+ TD('fdg' + cmplx, f='sqrt'),
TD(P, f='sqrt'),
),
'cbrt':
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 3389e7d66..140056432 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -25,7 +25,7 @@ __all__ = [
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
- 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
+ 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
@@ -380,6 +380,7 @@ def choose(a, choices, out=None, mode='raise'):
See Also
--------
ndarray.choose : equivalent method
+ numpy.take_along_axis : Preferable if `choices` is an array
Notes
-----
@@ -908,17 +909,17 @@ def sort(a, axis=-1, kind=None, order=None):
.. versionadded:: 1.12.0
- quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
+ quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
- `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
+ `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
- for the data type being sorted.
- It, along with 'mergesort' is currently mapped to
- `timsort <https://en.wikipedia.org/wiki/Timsort>`_
- or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
- depending on the data type.
+ for the data type being sorted.
+ It, along with 'mergesort' is currently mapped to
+ `timsort <https://en.wikipedia.org/wiki/Timsort>`_
+ or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
+ depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
@@ -2782,6 +2783,10 @@ def alen(a):
7
"""
+ # NumPy 1.18.0, 2019-08-02
+ warnings.warn(
+ "`np.alen` is deprecated, use `len` instead",
+ DeprecationWarning, stacklevel=2)
try:
return len(a)
except TypeError:
@@ -3120,10 +3125,37 @@ def around(a, decimals=0, out=None):
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
- -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
- to the inexact representation of decimal fractions in the IEEE
- floating point standard [1]_ and errors introduced when scaling
- by powers of ten.
+ -0.5 and 0.5 round to 0.0, etc.
+
+ ``np.around`` uses a fast but sometimes inexact algorithm to round
+ floating-point datatypes. For positive `decimals` it is equivalent to
+ ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
+ error due to the inexact representation of decimal fractions in the IEEE
+ floating point standard [1]_ and errors introduced when scaling by powers
+ of ten. For instance, note the extra "1" in the following:
+
+ >>> np.round(56294995342131.5, 3)
+ 56294995342131.51
+
+ If your goal is to print such values with a fixed number of decimals, it is
+ preferable to use numpy's float printing routines to limit the number of
+ printed decimals:
+
+ >>> np.format_float_positional(56294995342131.5, precision=3)
+ '56294995342131.5'
+
+ The float printing routines use an accurate but much more computationally
+ demanding algorithm to compute the number of digits after the decimal
+ point.
+
+ Alternatively, Python's builtin `round` function uses a more accurate
+ but slower algorithm for 64-bit floating point values:
+
+ >>> round(56294995342131.5, 3)
+ 56294995342131.5
+ >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
+ (16.06, 16.05)
+
References
----------
@@ -3414,7 +3446,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
- the default is `float32`; for arrays of float types it is the same as
+ the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
@@ -3573,30 +3605,3 @@ def alltrue(*args, **kwargs):
numpy.all : Equivalent function; see for details.
"""
return all(*args, **kwargs)
-
-
-@array_function_dispatch(_ndim_dispatcher)
-def rank(a):
- """
- Return the number of dimensions of an array.
-
- .. note::
- This function is deprecated in NumPy 1.9 to avoid confusion with
- `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
- should be used instead.
-
- See Also
- --------
- ndim : equivalent non-deprecated function
-
- Notes
- -----
- In the old Numeric package, `rank` was the term used for the number of
- dimensions, but in NumPy `ndim` is used instead.
- """
- # 2014-04-12, 1.9
- warnings.warn(
- "`rank` is deprecated; use the `ndim` attribute or function instead. "
- "To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
- VisibleDeprecationWarning, stacklevel=3)
- return ndim(a)
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 1221aeece..ad98d562b 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -1095,7 +1095,8 @@ typedef struct PyArrayIterObject_tag PyArrayIterObject;
* type of the function which translates a set of coordinates to a
* pointer to the data
*/
-typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*);
+typedef char* (*npy_iter_get_dataptr_t)(
+ PyArrayIterObject* iter, const npy_intp*);
struct PyArrayIterObject_tag {
PyObject_HEAD
@@ -1695,7 +1696,8 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags)
#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num)
#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL)
#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL)
-#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0)
+#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \
+ !PyDataType_HASFIELDS(dtype))
#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0)
#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj))
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 108c0a202..27b83f7b5 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -44,10 +44,14 @@
#else
#define NPY_GCC_TARGET_AVX
#endif
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
+#define HAVE_ATTRIBUTE_TARGET_FMA
+#define NPY_GCC_TARGET_FMA __attribute__((target("avx2,fma")))
+#endif
+
#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2
#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2")))
-#elif defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
-#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2")))
#else
#define NPY_GCC_TARGET_AVX2
#endif
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index dfb8ff526..69e690f28 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -113,37 +113,100 @@ NPY_INLINE static float __npy_nzerof(void)
#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */
#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */
-/*
- * Constants used in vector implementation of exp(x)
+/*
+ * Constants used in vector implementation of exp(x)
*/
#define NPY_RINT_CVT_MAGICf 0x1.800000p+23f
#define NPY_CODY_WAITE_LOGE_2_HIGHf -6.93145752e-1f
#define NPY_CODY_WAITE_LOGE_2_LOWf -1.42860677e-6f
-#define NPY_COEFF_P0_EXPf 9.999999999980870924916e-01f
-#define NPY_COEFF_P1_EXPf 7.257664613233124478488e-01f
-#define NPY_COEFF_P2_EXPf 2.473615434895520810817e-01f
-#define NPY_COEFF_P3_EXPf 5.114512081637298353406e-02f
-#define NPY_COEFF_P4_EXPf 6.757896990527504603057e-03f
-#define NPY_COEFF_P5_EXPf 5.082762527590693718096e-04f
-#define NPY_COEFF_Q0_EXPf 1.000000000000000000000e+00f
-#define NPY_COEFF_Q1_EXPf -2.742335390411667452936e-01f
-#define NPY_COEFF_Q2_EXPf 2.159509375685829852307e-02f
-
-/*
- * Constants used in vector implementation of log(x)
+#define NPY_COEFF_P0_EXPf 9.999999999980870924916e-01f
+#define NPY_COEFF_P1_EXPf 7.257664613233124478488e-01f
+#define NPY_COEFF_P2_EXPf 2.473615434895520810817e-01f
+#define NPY_COEFF_P3_EXPf 5.114512081637298353406e-02f
+#define NPY_COEFF_P4_EXPf 6.757896990527504603057e-03f
+#define NPY_COEFF_P5_EXPf 5.082762527590693718096e-04f
+#define NPY_COEFF_Q0_EXPf 1.000000000000000000000e+00f
+#define NPY_COEFF_Q1_EXPf -2.742335390411667452936e-01f
+#define NPY_COEFF_Q2_EXPf 2.159509375685829852307e-02f
+
+/*
+ * Constants used in vector implementation of log(x)
+ */
+#define NPY_COEFF_P0_LOGf 0.000000000000000000000e+00f
+#define NPY_COEFF_P1_LOGf 9.999999999999998702752e-01f
+#define NPY_COEFF_P2_LOGf 2.112677543073053063722e+00f
+#define NPY_COEFF_P3_LOGf 1.480000633576506585156e+00f
+#define NPY_COEFF_P4_LOGf 3.808837741388407920751e-01f
+#define NPY_COEFF_P5_LOGf 2.589979117907922693523e-02f
+#define NPY_COEFF_Q0_LOGf 1.000000000000000000000e+00f
+#define NPY_COEFF_Q1_LOGf 2.612677543073109236779e+00f
+#define NPY_COEFF_Q2_LOGf 2.453006071784736363091e+00f
+#define NPY_COEFF_Q3_LOGf 9.864942958519418960339e-01f
+#define NPY_COEFF_Q4_LOGf 1.546476374983906719538e-01f
+#define NPY_COEFF_Q5_LOGf 5.875095403124574342950e-03f
+/*
+ * Constants used in vector implementation of sinf/cosf(x)
+ */
+#define NPY_TWO_O_PIf 0x1.45f306p-1f
+#define NPY_CODY_WAITE_PI_O_2_HIGHf -0x1.921fb0p+00f
+#define NPY_CODY_WAITE_PI_O_2_MEDf -0x1.5110b4p-22f
+#define NPY_CODY_WAITE_PI_O_2_LOWf -0x1.846988p-48f
+#define NPY_COEFF_INVF0_COSINEf 0x1.000000p+00f
+#define NPY_COEFF_INVF2_COSINEf -0x1.000000p-01f
+#define NPY_COEFF_INVF4_COSINEf 0x1.55553cp-05f
+#define NPY_COEFF_INVF6_COSINEf -0x1.6c06dcp-10f
+#define NPY_COEFF_INVF8_COSINEf 0x1.98e616p-16f
+#define NPY_COEFF_INVF3_SINEf -0x1.555556p-03f
+#define NPY_COEFF_INVF5_SINEf 0x1.11119ap-07f
+#define NPY_COEFF_INVF7_SINEf -0x1.a06bbap-13f
+#define NPY_COEFF_INVF9_SINEf 0x1.7d3bbcp-19f
+/*
+ * Integer functions.
+ */
+NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b);
+
+NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);
+
+/*
+ * avx function has a common API for both sin & cos. This enum is used to
+ * distinguish between the two
*/
-#define NPY_COEFF_P0_LOGf 0.000000000000000000000e+00f
-#define NPY_COEFF_P1_LOGf 9.999999999999998702752e-01f
-#define NPY_COEFF_P2_LOGf 2.112677543073053063722e+00f
-#define NPY_COEFF_P3_LOGf 1.480000633576506585156e+00f
-#define NPY_COEFF_P4_LOGf 3.808837741388407920751e-01f
-#define NPY_COEFF_P5_LOGf 2.589979117907922693523e-02f
-#define NPY_COEFF_Q0_LOGf 1.000000000000000000000e+00f
-#define NPY_COEFF_Q1_LOGf 2.612677543073109236779e+00f
-#define NPY_COEFF_Q2_LOGf 2.453006071784736363091e+00f
-#define NPY_COEFF_Q3_LOGf 9.864942958519418960339e-01f
-#define NPY_COEFF_Q4_LOGf 1.546476374983906719538e-01f
-#define NPY_COEFF_Q5_LOGf 5.875095403124574342950e-03f
+typedef enum {
+ npy_compute_sin,
+ npy_compute_cos
+} NPY_TRIG_OP;
/*
* C99 double math funcs
diff --git a/numpy/core/info.py b/numpy/core/info.py
deleted file mode 100644
index c6f7bbcf2..000000000
--- a/numpy/core/info.py
+++ /dev/null
@@ -1,87 +0,0 @@
-"""Defines a multi-dimensional array and useful procedures for Numerical computation.
-
-Functions
-
-- array - NumPy Array construction
-- zeros - Return an array of all zeros
-- empty - Return an uninitialized array
-- shape - Return shape of sequence or array
-- rank - Return number of dimensions
-- size - Return number of elements in entire array or a
- certain dimension
-- fromstring - Construct array from (byte) string
-- take - Select sub-arrays using sequence of indices
-- put - Set sub-arrays using sequence of 1-D indices
-- putmask - Set portion of arrays using a mask
-- reshape - Return array with new shape
-- repeat - Repeat elements of array
-- choose - Construct new array from indexed array tuple
-- correlate - Correlate two 1-d arrays
-- searchsorted - Search for element in 1-d array
-- sum - Total sum over a specified dimension
-- average - Average, possibly weighted, over axis or array.
-- cumsum - Cumulative sum over a specified dimension
-- product - Total product over a specified dimension
-- cumproduct - Cumulative product over a specified dimension
-- alltrue - Logical and over an entire axis
-- sometrue - Logical or over an entire axis
-- allclose - Tests if sequences are essentially equal
-
-More Functions:
-
-- arange - Return regularly spaced array
-- asarray - Guarantee NumPy array
-- convolve - Convolve two 1-d arrays
-- swapaxes - Exchange axes
-- concatenate - Join arrays together
-- transpose - Permute axes
-- sort - Sort elements of array
-- argsort - Indices of sorted array
-- argmax - Index of largest value
-- argmin - Index of smallest value
-- inner - Innerproduct of two arrays
-- dot - Dot product (matrix multiplication)
-- outer - Outerproduct of two arrays
-- resize - Return array with arbitrary new shape
-- indices - Tuple of indices
-- fromfunction - Construct array from universal function
-- diagonal - Return diagonal array
-- trace - Trace of array
-- dump - Dump array to file object (pickle)
-- dumps - Return pickled string representing data
-- load - Return array stored in file object
-- loads - Return array from pickled string
-- ravel - Return array as 1-D
-- nonzero - Indices of nonzero elements for 1-D array
-- shape - Shape of array
-- where - Construct array from binary result
-- compress - Elements of array where condition is true
-- clip - Clip array between two values
-- ones - Array of all ones
-- identity - 2-D identity array (matrix)
-
-(Universal) Math Functions
-
- add logical_or exp
- subtract logical_xor log
- multiply logical_not log10
- divide maximum sin
- divide_safe minimum sinh
- conjugate bitwise_and sqrt
- power bitwise_or tan
- absolute bitwise_xor tanh
- negative invert ceil
- greater left_shift fabs
- greater_equal right_shift floor
- less arccos arctan2
- less_equal arcsin fmod
- equal arctan hypot
- not_equal cos around
- logical_and cosh sign
- arccosh arcsinh arctanh
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['testing']
-global_symbols = ['*']
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index ea2ef900e..c395b1348 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -26,6 +26,7 @@ if sys.version_info[0] < 3:
from . import overrides
from . import umath
+from . import shape_base
from .overrides import set_module
from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
@@ -48,14 +49,6 @@ array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
-def loads(*args, **kwargs):
- # NumPy 1.15.0, 2017-12-10
- warnings.warn(
- "np.core.numeric.loads is deprecated, use pickle.loads instead",
- DeprecationWarning, stacklevel=2)
- return pickle.loads(*args, **kwargs)
-
-
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
@@ -66,7 +59,7 @@ __all__ = [
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
- 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones',
+ 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
'identity', 'allclose', 'compare_chararrays', 'putmask',
'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
@@ -553,8 +546,10 @@ def argwhere(a):
Returns
-------
- index_array : ndarray
+ index_array : (N, a.ndim) ndarray
Indices of elements that are non-zero. Indices are grouped by element.
+ This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
+ non-zero items.
See Also
--------
@@ -562,7 +557,8 @@ def argwhere(a):
Notes
-----
- ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
+ ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
+ but produces a result of the correct shape for a 0D array.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``nonzero(a)`` instead.
@@ -580,6 +576,11 @@ def argwhere(a):
[1, 2]])
"""
+ # nonzero does not behave well on 0d, so promote to 1d
+ if np.ndim(a) == 0:
+ a = shape_base.atleast_1d(a)
+ # then remove the added dimension
+ return argwhere(a)[:,:0]
return transpose(nonzero(a))
@@ -1935,6 +1936,10 @@ def binary_repr(num, width=None):
"will raise an error in the future.", DeprecationWarning,
stacklevel=3)
+ # Ensure that num is a Python integer to avoid overflow or unwanted
+ # casts to floating point.
+ num = operator.index(num)
+
if num == 0:
return '0' * (width or 1)
@@ -2024,30 +2029,6 @@ def base_repr(number, base=2, padding=0):
return ''.join(reversed(res or '0'))
-def load(file):
- """
- Wrapper around cPickle.load which accepts either a file-like object or
- a filename.
-
- Note that the NumPy binary format is not based on pickle/cPickle anymore.
- For details on the preferred way of loading and saving files, see `load`
- and `save`.
-
- See Also
- --------
- load, save
-
- """
- # NumPy 1.15.0, 2017-12-10
- warnings.warn(
- "np.core.numeric.load is deprecated, use pickle.load instead",
- DeprecationWarning, stacklevel=2)
- if isinstance(file, type("")):
- with open(file, "rb") as file_pointer:
- return pickle.load(file_pointer)
- return pickle.load(file)
-
-
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
@@ -2124,7 +2105,7 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
- considered equal to NaN's in `b` in the output array.
+ considered equal to NaN's in `b`.
.. versionadded:: 1.10.0
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index 04a5a995f..55c7bd1ea 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -109,6 +109,18 @@ def set_module(module):
return decorator
+
+# Call textwrap.dedent here instead of in the function so as to avoid
+# calling dedent multiple times on the same text
+_wrapped_func_source = textwrap.dedent("""
+ @functools.wraps(implementation)
+ def {name}(*args, **kwargs):
+ relevant_args = dispatcher(*args, **kwargs)
+ return implement_array_function(
+ implementation, {name}, relevant_args, args, kwargs)
+ """)
+
+
def array_function_dispatch(dispatcher, module=None, verify=True,
docs_from_dispatcher=False):
"""Decorator for adding dispatch with the __array_function__ protocol.
@@ -163,13 +175,7 @@ def array_function_dispatch(dispatcher, module=None, verify=True,
# more interpettable name. Otherwise, the original function does not
# show up at all in many cases, e.g., if it's written in C or if the
# dispatcher gets an invalid keyword argument.
- source = textwrap.dedent("""
- @functools.wraps(implementation)
- def {name}(*args, **kwargs):
- relevant_args = dispatcher(*args, **kwargs)
- return implement_array_function(
- implementation, {name}, relevant_args, args, kwargs)
- """).format(name=implementation.__name__)
+ source = _wrapped_func_source.format(name=implementation.__name__)
source_object = compile(
source, filename='<__array_function__ internals>', mode='exec')
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 0576005e7..a1439f9df 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -268,8 +268,8 @@ class record(nt.void):
except AttributeError:
#happens if field is Object type
return obj
- if dt.fields:
- return obj.view((self.__class__, obj.dtype.fields))
+ if dt.names is not None:
+ return obj.view((self.__class__, obj.dtype))
return obj
else:
raise AttributeError("'record' object has no "
@@ -293,8 +293,8 @@ class record(nt.void):
obj = nt.void.__getitem__(self, indx)
# copy behavior of record.__getattribute__,
- if isinstance(obj, nt.void) and obj.dtype.fields:
- return obj.view((self.__class__, obj.dtype.fields))
+ if isinstance(obj, nt.void) and obj.dtype.names is not None:
+ return obj.view((self.__class__, obj.dtype))
else:
# return a single element
return obj
@@ -444,7 +444,7 @@ class recarray(ndarray):
return self
def __array_finalize__(self, obj):
- if self.dtype.type is not record and self.dtype.fields:
+ if self.dtype.type is not record and self.dtype.names is not None:
# if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
@@ -472,7 +472,7 @@ class recarray(ndarray):
# with void type convert it to the same dtype.type (eg to preserve
# numpy.record type if present), since nested structured fields do not
# inherit type. Don't do this for non-void structures though.
- if obj.dtype.fields:
+ if obj.dtype.names is not None:
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
@@ -487,7 +487,7 @@ class recarray(ndarray):
# Automatically convert (void) structured types to records
# (but not non-void structures, subarrays, or non-structured voids)
- if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields:
+ if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None:
val = sb.dtype((record, val))
newattr = attr not in self.__dict__
@@ -521,7 +521,7 @@ class recarray(ndarray):
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
- if obj.dtype.fields:
+ if obj.dtype.names is not None:
obj = obj.view(type(self))
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
@@ -577,7 +577,7 @@ class recarray(ndarray):
if val is None:
obj = self.getfield(*res)
- if obj.dtype.fields:
+ if obj.dtype.names is not None:
return obj
return obj.view(ndarray)
else:
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 338502791..5f2f4a7b2 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -463,8 +463,14 @@ def configuration(parent_package='',top_path=None):
rep = check_long_double_representation(config_cmd)
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
+ if check_for_right_shift_internal_compiler_error(config_cmd):
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift')
+
# Py3K check
- if sys.version_info[0] == 3:
+ if sys.version_info[0] >= 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
@@ -491,10 +497,10 @@ def configuration(parent_package='',top_path=None):
#endif
"""))
- print('File:', target)
+ log.info('File: %s' % target)
with open(target) as target_f:
- print(target_f.read())
- print('EOF')
+ log.info(target_f.read())
+ log.info('EOF')
else:
mathlibs = []
with open(target) as target_f:
@@ -581,10 +587,10 @@ def configuration(parent_package='',top_path=None):
"""))
# Dump the numpyconfig.h header to stdout
- print('File: %s' % target)
+ log.info('File: %s' % target)
with open(target) as target_f:
- print(target_f.read())
- print('EOF')
+ log.info(target_f.read())
+ log.info('EOF')
config.add_data_files((header_dir, target))
return target
@@ -633,23 +639,6 @@ def configuration(parent_package='',top_path=None):
]
#######################################################################
- # dummy module #
- #######################################################################
-
- # npymath needs the config.h and numpyconfig.h files to be generated, but
- # build_clib cannot handle generate_config_h and generate_numpyconfig_h
- # (don't ask). Because clib are generated before extensions, we have to
- # explicitly add an extension which has generate_config_h and
- # generate_numpyconfig_h as sources *before* adding npymath.
-
- config.add_extension('_dummy',
- sources=[join('src', 'dummymodule.c'),
- generate_config_h,
- generate_numpyconfig_h,
- generate_numpy_api]
- )
-
- #######################################################################
# npymath library #
#######################################################################
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 307fab334..84b78b585 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -5,6 +5,7 @@ import sys
import warnings
import copy
import binascii
+import textwrap
from numpy.distutils.misc_util import mingw32
@@ -14,7 +15,7 @@ from numpy.distutils.misc_util import mingw32
#-------------------
# How to change C_API_VERSION ?
# - increase C_API_VERSION value
-# - record the hash for the new C API with the script cversions.py
+# - record the hash for the new C API with the cversions.py script
# and add the hash to cversions.txt
# The hash values are used to remind developers when the C API number was not
# updated - generates a MismatchCAPIWarning warning which is turned into an
@@ -88,14 +89,13 @@ def check_api_version(apiversion, codegen_dir):
# codegen_dir have been updated without the API version being
# updated. Any modification in those .txt files should be reflected
# in the api and eventually abi versions.
- # To compute the checksum of the current API, use
- # code_generators/cversions.py script
+ # To compute the checksum of the current API, use numpy/core/cversions.py
if not curapi_hash == api_hash:
msg = ("API mismatch detected, the C API version "
"numbers have to be updated. Current C api version is %d, "
- "with checksum %s, but recorded checksum for C API version %d in "
- "codegen_dir/cversions.txt is %s. If functions were added in the "
- "C API, you have to update C_API_VERSION in %s."
+ "with checksum %s, but recorded checksum for C API version %d "
+ "in core/codegen_dir/cversions.txt is %s. If functions were "
+ "added in the C API, you have to update C_API_VERSION in %s."
)
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
__file__),
@@ -179,9 +179,10 @@ OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
# gcc 4.8.4 support attributes but not with intrisics
# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code)
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
-OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2")))',
+OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))',
'attribute_target_avx2_with_intrinsics',
- '__m256 temp = _mm256_set1_ps(1.0)',
+ '__m256 temp = _mm256_set1_ps(1.0); temp = \
+ _mm256_fmadd_ps(temp, temp, temp)',
'immintrin.h'),
('__attribute__((target("avx512f")))',
'attribute_target_avx512f_with_intrinsics',
@@ -415,3 +416,41 @@ def long_double_representation(lines):
else:
# We never detected the after_sequence
raise ValueError("Could not lock sequences (%s)" % saw)
+
+
+def check_for_right_shift_internal_compiler_error(cmd):
+ """
+ On our arm CI, this fails with an internal compilation error
+
+ The failure looks like the following, and can be reproduced on ARM64 GCC 5.4:
+
+ <source>: In function 'right_shift':
+ <source>:4:20: internal compiler error: in expand_shift_1, at expmed.c:2349
+ ip1[i] = ip1[i] >> in2;
+ ^
+ Please submit a full bug report,
+ with preprocessed source if appropriate.
+ See <http://gcc.gnu.org/bugs.html> for instructions.
+ Compiler returned: 1
+
+ This function returns True if this compiler bug is present, and we need to
+ turn off optimization for the function
+ """
+ cmd._check_compiler()
+ has_optimize = cmd.try_compile(textwrap.dedent("""\
+ __attribute__((optimize("O3"))) void right_shift() {}
+ """), None, None)
+ if not has_optimize:
+ return False
+
+ no_err = cmd.try_compile(textwrap.dedent("""\
+ typedef long the_type; /* fails also for unsigned and long long */
+ __attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) {
+ for (int i = 0; i < n; i++) {
+ if (in2 < (the_type)sizeof(the_type) * 8) {
+ ip1[i] = ip1[i] >> in2;
+ }
+ }
+ }
+ """), None, None)
+ return not no_err
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 710f64827..d7e769e62 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -9,8 +9,9 @@ import warnings
from . import numeric as _nx
from . import overrides
-from .numeric import array, asanyarray, newaxis
+from ._asarray import array, asanyarray
from .multiarray import normalize_axis_index
+from . import fromnumeric as _from_nx
array_function_dispatch = functools.partial(
@@ -123,7 +124,7 @@ def atleast_2d(*arys):
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
- result = ary[newaxis, :]
+ result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
@@ -193,9 +194,9 @@ def atleast_3d(*arys):
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
- result = ary[newaxis, :, newaxis]
+ result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
- result = ary[:, :, newaxis]
+ result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
@@ -435,9 +436,9 @@ def stack(arrays, axis=0, out=None):
# Internal functions to eliminate the overhead of repeated dispatch in one of
# the two possible paths inside np.block.
# Use getattr to protect against __array_function__ being disabled.
-_size = getattr(_nx.size, '__wrapped__', _nx.size)
-_ndim = getattr(_nx.ndim, '__wrapped__', _nx.ndim)
-_concatenate = getattr(_nx.concatenate, '__wrapped__', _nx.concatenate)
+_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
+_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
+_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
def _block_format_index(index):
diff --git a/numpy/core/src/common/npy_partition.h.src b/numpy/core/src/common/npy_partition.h.src
index a22cf911c..97dc2536b 100644
--- a/numpy/core/src/common/npy_partition.h.src
+++ b/numpy/core/src/common/npy_partition.h.src
@@ -113,9 +113,6 @@ get_argpartition_func(int type, NPY_SELECTKIND which)
npy_intp i;
npy_intp ntypes = ARRAY_SIZE(_part_map);
- if (which >= NPY_NSELECTS) {
- return NULL;
- }
for (i = 0; i < ntypes; i++) {
if (type == _part_map[i].typenum) {
return _part_map[i].argpart[which];
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index 1365e87bb..9e6083e2a 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -593,6 +593,25 @@ fail:
return NULL;
}
+/*
+ * Helper to test fromstring of 0 terminated strings, as the C-API supports
+ * the -1 length identifier.
+ */
+static PyObject *
+fromstring_null_term_c_api(PyObject *dummy, PyObject *byte_obj)
+{
+ char *string;
+
+ string = PyBytes_AsString(byte_obj);
+ if (string == NULL) {
+ return NULL;
+ }
+
+ return PyArray_FromString(
+ string, -1, PyArray_DescrFromType(NPY_FLOAT64), -1, " ");
+}
+
+
/* check no elison for avoided increfs */
static PyObject *
incref_elide(PyObject *dummy, PyObject *args)
@@ -656,6 +675,43 @@ npy_updateifcopy_deprecation(PyObject* NPY_UNUSED(self), PyObject* args)
Py_RETURN_NONE;
}
+/* used to test PyArray_As1D usage emits not implemented error */
+static PyObject*
+npy_pyarrayas1d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
+{
+ PyObject *op = Py_BuildValue("i", 42);
+ PyObject *result = op;
+ int dim = 4;
+ double arg[2] = {1, 2};
+ int temp = PyArray_As1D(&result, (char **)&arg, &dim, NPY_DOUBLE);
+ if (temp < 0) {
+ Py_DECREF(op);
+ return NULL;
+ }
+ /* op != result */
+ Py_DECREF(op);
+ return result;
+}
+
+/* used to test PyArray_As2D usage emits not implemented error */
+static PyObject*
+npy_pyarrayas2d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
+{
+ PyObject *op = Py_BuildValue("i", 42);
+ PyObject *result = op;
+ int dim1 = 4;
+ int dim2 = 6;
+ double arg[2][2] = {{1, 2}, {3, 4}};
+ int temp = PyArray_As2D(&result, (char ***)&arg, &dim1, &dim2, NPY_DOUBLE);
+ if (temp < 0) {
+ Py_DECREF(op);
+ return NULL;
+ }
+ /* op != result */
+ Py_DECREF(op);
+ return result;
+}
+
/* used to create array with WRITEBACKIFCOPY flag */
static PyObject*
npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args)
@@ -1927,6 +1983,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"test_inplace_increment",
inplace_increment,
METH_VARARGS, NULL},
+ {"fromstring_null_term_c_api",
+ fromstring_null_term_c_api,
+ METH_O, NULL},
{"incref_elide",
incref_elide,
METH_VARARGS, NULL},
@@ -1939,6 +1998,12 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"npy_updateifcopy_deprecation",
npy_updateifcopy_deprecation,
METH_O, NULL},
+ {"npy_pyarrayas1d_deprecation",
+ npy_pyarrayas1d_deprecation,
+ METH_NOARGS, NULL},
+ {"npy_pyarrayas2d_deprecation",
+ npy_pyarrayas2d_deprecation,
+ METH_NOARGS, NULL},
{"npy_create_writebackifcopy",
npy_create_writebackifcopy,
METH_O, NULL},
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index addc9f006..a7f34cbe5 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -25,10 +25,14 @@
#include <assert.h>
-#ifdef HAVE_SYS_MMAN_H
+#ifdef NPY_OS_LINUX
#include <sys/mman.h>
-#if defined MADV_HUGEPAGE && defined HAVE_MADVISE
-#define HAVE_MADV_HUGEPAGE
+#ifndef MADV_HUGEPAGE
+/*
+ * Use code 14 (MADV_HUGEPAGE) if it isn't defined. This gives a chance of
+ * enabling huge pages even if built with linux kernel < 2.6.38
+ */
+#define MADV_HUGEPAGE 14
#endif
#endif
@@ -74,11 +78,15 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz,
#ifdef _PyPyGC_AddMemoryPressure
_PyPyPyGC_AddMemoryPressure(nelem * esz);
#endif
-#ifdef HAVE_MADV_HUGEPAGE
+#ifdef NPY_OS_LINUX
/* allow kernel allocating huge pages for large arrays */
if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u)))) {
npy_uintp offset = 4096u - (npy_uintp)p % (4096u);
npy_uintp length = nelem * esz - offset;
+ /**
+ * Intentionally not checking for errors that may be returned by
+ * older kernel versions; optimistically tries enabling huge pages.
+ */
madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE);
}
#endif
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index bbb736fd0..4e229e321 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -462,7 +462,7 @@ WARN_IN_DEALLOC(PyObject* warning, const char * msg) {
PyErr_WriteUnraisable(Py_None);
}
}
-};
+}
/* array object functions */
@@ -483,10 +483,11 @@ array_dealloc(PyArrayObject *self)
char const * msg = "WRITEBACKIFCOPY detected in array_dealloc. "
" Required call to PyArray_ResolveWritebackIfCopy or "
"PyArray_DiscardWritebackIfCopy is missing.";
- Py_INCREF(self); /* hold on to self in next call since if
- * refcount == 0 it will recurse back into
- *array_dealloc
- */
+ /*
+ * prevent reaching 0 twice and thus recursing into dealloc.
+ * Increasing sys.gettotalrefcount, but path should not be taken.
+ */
+ Py_INCREF(self);
WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
if (retval < 0)
@@ -500,10 +501,11 @@ array_dealloc(PyArrayObject *self)
char const * msg = "UPDATEIFCOPY detected in array_dealloc. "
" Required call to PyArray_ResolveWritebackIfCopy or "
"PyArray_DiscardWritebackIfCopy is missing";
- Py_INCREF(self); /* hold on to self in next call since if
- * refcount == 0 it will recurse back into
- *array_dealloc
- */
+ /*
+ * prevent reaching 0 twice and thus recursing into dealloc.
+ * Increasing sys.gettotalrefcount, but path should not be taken.
+ */
+ Py_INCREF(self);
/* 2017-Nov-10 1.14 */
WARN_IN_DEALLOC(PyExc_DeprecationWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
@@ -523,12 +525,7 @@ array_dealloc(PyArrayObject *self)
if ((fa->flags & NPY_ARRAY_OWNDATA) && fa->data) {
/* Free internal references if an Object array */
if (PyDataType_FLAGCHK(fa->descr, NPY_ITEM_REFCOUNT)) {
- Py_INCREF(self); /*hold on to self */
PyArray_XDECREF(self);
- /*
- * Don't need to DECREF -- because we are deleting
- * self already...
- */
}
npy_free_cache(fa->data, PyArray_NBYTES(self));
}
@@ -610,7 +607,7 @@ PyArray_DebugPrint(PyArrayObject *obj)
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT void
-PyArray_SetDatetimeParseFunction(PyObject *op)
+PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op))
{
}
@@ -633,7 +630,7 @@ PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
/*NUMPY_API
*/
NPY_NO_EXPORT int
-PyArray_CompareString(char *s1, char *s2, size_t len)
+PyArray_CompareString(const char *s1, const char *s2, size_t len)
{
const unsigned char *c1 = (unsigned char *)s1;
const unsigned char *c2 = (unsigned char *)s2;
@@ -1203,15 +1200,28 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op)
}
}
if (res == NULL && !PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError, "No fields found.");
+ /* these dtypes had no fields. Use a MultiIter to broadcast them
+ * to an output array, and fill with True (for EQ)*/
+ PyArrayMultiIterObject *mit = (PyArrayMultiIterObject *)
+ PyArray_MultiIterNew(2, self, other);
+ if (mit == NULL) {
+ return NULL;
+ }
+
+ res = PyArray_NewFromDescr(&PyArray_Type,
+ PyArray_DescrFromType(NPY_BOOL),
+ mit->nd, mit->dimensions,
+ NULL, NULL, 0, NULL);
+ Py_DECREF(mit);
+ if (res) {
+ PyArray_FILLWBYTE((PyArrayObject *)res,
+ cmp_op == Py_EQ ? 1 : 0);
+ }
}
return res;
}
else {
- /*
- * compare as a string. Assumes self and
- * other have same descr->type
- */
+ /* compare as a string. Assumes self and other have same descr->type */
return _strings_richcompare(self, other, cmp_op, 0);
}
}
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index dc79bfa09..055d3e60f 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -367,6 +367,18 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
#define LIKELY_IN_CACHE_SIZE 8
+#ifdef __INTEL_COMPILER
+#pragma intel optimization_level 0
+#endif
+static NPY_INLINE npy_intp
+_linear_search(const npy_double key, const npy_double *arr, const npy_intp len, const npy_intp i0)
+{
+ npy_intp i;
+
+ for (i = i0; i < len && key >= arr[i]; i++);
+ return i - 1;
+}
+
/** @brief find index of a sorted array such that arr[i] <= key < arr[i + 1].
*
* If an starting index guess is in-range, the array values around this
@@ -406,10 +418,7 @@ binary_search_with_guess(const npy_double key, const npy_double *arr,
* From above we know key >= arr[0] when we start.
*/
if (len <= 4) {
- npy_intp i;
-
- for (i = 1; i < len && key >= arr[i]; ++i);
- return i - 1;
+ return _linear_search(key, arr, len, 1);
}
if (guess > len - 3) {
@@ -933,6 +942,20 @@ ravel_multi_index_loop(int ravel_ndim, npy_intp *ravel_dims,
char invalid;
npy_intp j, m;
+ /*
+ * Check for 0-dimensional axes unless there is nothing to do.
+ * An empty array/shape cannot be indexed at all.
+ */
+ if (count != 0) {
+ for (i = 0; i < ravel_ndim; ++i) {
+ if (ravel_dims[i] == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot unravel if shape has zero entries (is empty).");
+ return NPY_FAIL;
+ }
+ }
+ }
+
NPY_BEGIN_ALLOW_THREADS;
invalid = 0;
while (count--) {
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index 7db467308..aa4e40e66 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -543,35 +543,6 @@ PyArray_AssignZero(PyArrayObject *dst,
return retcode;
}
-/*
- * Fills an array with ones.
- *
- * dst: The destination array.
- * wheremask: If non-NULL, a boolean mask specifying where to set the values.
- *
- * Returns 0 on success, -1 on failure.
- */
-NPY_NO_EXPORT int
-PyArray_AssignOne(PyArrayObject *dst,
- PyArrayObject *wheremask)
-{
- npy_bool value;
- PyArray_Descr *bool_dtype;
- int retcode;
-
- /* Create a raw bool scalar with the value True */
- bool_dtype = PyArray_DescrFromType(NPY_BOOL);
- if (bool_dtype == NULL) {
- return -1;
- }
- value = 1;
-
- retcode = PyArray_AssignRawScalar(dst, bool_dtype, (char *)&value,
- wheremask, NPY_SAFE_CASTING);
-
- Py_DECREF(bool_dtype);
- return retcode;
-}
/*NUMPY_API
* Copy an array.
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 53efb1cea..c5199c015 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -40,9 +40,31 @@
* regards to the handling of text representations.
*/
+/*
+ * Scanning function for next element parsing and seperator skipping.
+ * These functions return:
+ * - 0 to indicate more data to read
+ * - -1 when reading stopped at the end of the string/file
+ * - -2 when reading stopped before the end was reached.
+ *
+ * The dtype specific parsing functions may set the python error state
+ * (they have to get the GIL first) additionally.
+ */
typedef int (*next_element)(void **, void *, PyArray_Descr *, void *);
typedef int (*skip_separator)(void **, const char *, void *);
+
+static npy_bool
+string_is_fully_read(char const* start, char const* end) {
+ if (end == NULL) {
+ return *start == '\0'; /* null terminated */
+ }
+ else {
+ return start >= end; /* fixed length */
+ }
+}
+
+
static int
fromstr_next_element(char **s, void *dptr, PyArray_Descr *dtype,
const char *end)
@@ -50,19 +72,23 @@ fromstr_next_element(char **s, void *dptr, PyArray_Descr *dtype,
char *e = *s;
int r = dtype->f->fromstr(*s, dptr, &e, dtype);
/*
- * fromstr always returns 0 for basic dtypes
- * s points to the end of the parsed string
- * if an error occurs s is not changed
+ * fromstr always returns 0 for basic dtypes; s points to the end of the
+ * parsed string. If s is not changed an error occurred or the end was
+ * reached.
*/
- if (*s == e) {
- /* Nothing read */
- return -1;
+ if (*s == e || r < 0) {
+ /* Nothing read, could be end of string or an error (or both) */
+ if (string_is_fully_read(*s, end)) {
+ return -1;
+ }
+ return -2;
}
*s = e;
if (end != NULL && *s > end) {
+ /* Stop the iteration if we read far enough */
return -1;
}
- return r;
+ return 0;
}
static int
@@ -75,9 +101,13 @@ fromfile_next_element(FILE **fp, void *dptr, PyArray_Descr *dtype,
if (r == 1) {
return 0;
}
- else {
+ else if (r == EOF) {
return -1;
}
+ else {
+ /* unable to read more, but EOF not reached indicating an error. */
+ return -2;
+ }
}
/*
@@ -143,9 +173,10 @@ fromstr_skip_separator(char **s, const char *sep, const char *end)
{
char *string = *s;
int result = 0;
+
while (1) {
char c = *string;
- if (c == '\0' || (end != NULL && string >= end)) {
+ if (string_is_fully_read(string, end)) {
result = -1;
break;
}
@@ -936,6 +967,39 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
return 0;
}
+static PyObject *
+raise_memory_error(int nd, npy_intp *dims, PyArray_Descr *descr)
+{
+ static PyObject *exc_type = NULL;
+
+ npy_cache_import(
+ "numpy.core._exceptions", "_ArrayMemoryError",
+ &exc_type);
+ if (exc_type == NULL) {
+ goto fail;
+ }
+
+ PyObject *shape = PyArray_IntTupleFromIntp(nd, dims);
+ if (shape == NULL) {
+ goto fail;
+ }
+
+ /* produce an error object */
+ PyObject *exc_value = PyTuple_Pack(2, shape, (PyObject *)descr);
+ Py_DECREF(shape);
+ if (exc_value == NULL){
+ goto fail;
+ }
+ PyErr_SetObject(exc_type, exc_value);
+ Py_DECREF(exc_value);
+ return NULL;
+
+fail:
+ /* we couldn't raise the formatted exception for some reason */
+ PyErr_WriteUnraisable(NULL);
+ return PyErr_NoMemory();
+}
+
/*
* Generic new array creation routine.
* Internal variant with calloc argument for PyArray_Zeros.
@@ -1113,30 +1177,7 @@ PyArray_NewFromDescr_int(
data = npy_alloc_cache(nbytes);
}
if (data == NULL) {
- static PyObject *exc_type = NULL;
-
- npy_cache_import(
- "numpy.core._exceptions", "_ArrayMemoryError",
- &exc_type);
- if (exc_type == NULL) {
- return NULL;
- }
-
- PyObject *shape = PyArray_IntTupleFromIntp(fa->nd,fa->dimensions);
- if (shape == NULL) {
- return NULL;
- }
-
- /* produce an error object */
- PyObject *exc_value = PyTuple_Pack(2, shape, descr);
- Py_DECREF(shape);
- if (exc_value == NULL){
- return NULL;
- }
- PyErr_SetObject(exc_type, exc_value);
- Py_DECREF(exc_value);
- return NULL;
-
+ return raise_memory_error(fa->nd, fa->dimensions, descr);
}
fa->flags |= NPY_ARRAY_OWNDATA;
@@ -1426,28 +1467,6 @@ _dtype_from_buffer_3118(PyObject *memoryview)
}
-/*
- * Call the python _is_from_ctypes
- */
-NPY_NO_EXPORT int
-_is_from_ctypes(PyObject *obj) {
- PyObject *ret_obj;
- static PyObject *py_func = NULL;
-
- npy_cache_import("numpy.core._internal", "_is_from_ctypes", &py_func);
-
- if (py_func == NULL) {
- return -1;
- }
- ret_obj = PyObject_CallFunctionObjArgs(py_func, obj, NULL);
- if (ret_obj == NULL) {
- return -1;
- }
-
- return PyObject_IsTrue(ret_obj);
-}
-
-
NPY_NO_EXPORT PyObject *
_array_from_buffer_3118(PyObject *memoryview)
{
@@ -1849,13 +1868,6 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
*out_arr = NULL;
return 0;
}
- if (is_object && (requested_dtype != NULL) &&
- (requested_dtype->type_num != NPY_OBJECT)) {
- PyErr_SetString(PyExc_ValueError,
- "cannot create an array from unequal-length (ragged) sequences");
- Py_DECREF(*out_dtype);
- return -1;
- }
/* If object arrays are forced */
if (is_object) {
Py_DECREF(*out_dtype);
@@ -2772,61 +2784,30 @@ PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype)
/* They all zero-out the memory as previously done */
/* steals reference to descr -- and enforces native byteorder on it.*/
+
/*NUMPY_API
- Like FromDimsAndData but uses the Descr structure instead of typecode
- as input.
+ Deprecated, use PyArray_NewFromDescr instead.
*/
NPY_NO_EXPORT PyObject *
-PyArray_FromDimsAndDataAndDescr(int nd, int *d,
+PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd), int *NPY_UNUSED(d),
PyArray_Descr *descr,
- char *data)
+ char *NPY_UNUSED(data))
{
- PyObject *ret;
- int i;
- npy_intp newd[NPY_MAXDIMS];
- char msg[] = "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.";
-
- if (DEPRECATE(msg) < 0) {
- /* 2009-04-30, 1.5 */
- return NULL;
- }
- if (!PyArray_ISNBO(descr->byteorder))
- descr->byteorder = '=';
- for (i = 0; i < nd; i++) {
- newd[i] = (npy_intp) d[i];
- }
- ret = PyArray_NewFromDescr(&PyArray_Type, descr,
- nd, newd,
- NULL, data,
- (data ? NPY_ARRAY_CARRAY : 0), NULL);
- return ret;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.");
+ Py_DECREF(descr);
+ return NULL;
}
/*NUMPY_API
- Construct an empty array from dimensions and typenum
+ Deprecated, use PyArray_SimpleNew instead.
*/
NPY_NO_EXPORT PyObject *
-PyArray_FromDims(int nd, int *d, int type)
+PyArray_FromDims(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type))
{
- PyArrayObject *ret;
- char msg[] = "PyArray_FromDims: use PyArray_SimpleNew.";
-
- if (DEPRECATE(msg) < 0) {
- /* 2009-04-30, 1.5 */
- return NULL;
- }
- ret = (PyArrayObject *)PyArray_FromDimsAndDataAndDescr(nd, d,
- PyArray_DescrFromType(type),
- NULL);
- /*
- * Old FromDims set memory to zero --- some algorithms
- * relied on that. Better keep it the same. If
- * Object type, then it's already been set to zero, though.
- */
- if (ret && (PyArray_DESCR(ret)->type_num != NPY_OBJECT)) {
- memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret));
- }
- return (PyObject *)ret;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_FromDims: use PyArray_SimpleNew.");
+ return NULL;
}
/* end old calls */
@@ -3656,6 +3637,7 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
npy_intp i;
char *dptr, *clean_sep, *tmp;
int err = 0;
+ int stop_reading_flag; /* -1 indicates end reached; -2 a parsing error */
npy_intp thisbuf = 0;
npy_intp size;
npy_intp bytes, totalbytes;
@@ -3683,9 +3665,9 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
NPY_BEGIN_ALLOW_THREADS;
totalbytes = bytes = size * dtype->elsize;
dptr = PyArray_DATA(r);
- for (i= 0; num < 0 || i < num; i++) {
- if (next(&stream, dptr, dtype, stream_data) < 0) {
- /* EOF */
+ for (i = 0; num < 0 || i < num; i++) {
+ stop_reading_flag = next(&stream, dptr, dtype, stream_data);
+ if (stop_reading_flag < 0) {
break;
}
*nread += 1;
@@ -3702,7 +3684,12 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
dptr = tmp + (totalbytes - bytes);
thisbuf = 0;
}
- if (skip_sep(&stream, clean_sep, stream_data) < 0) {
+ stop_reading_flag = skip_sep(&stream, clean_sep, stream_data);
+ if (stop_reading_flag < 0) {
+ if (num == i + 1) {
+ /* if we read as much as requested sep is optional */
+ stop_reading_flag = -1;
+ }
break;
}
}
@@ -3721,6 +3708,21 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
}
}
NPY_END_ALLOW_THREADS;
+
+ if (stop_reading_flag == -2) {
+ if (PyErr_Occurred()) {
+ /* If an error is already set (unlikely), do not create new one */
+ Py_DECREF(r);
+ return NULL;
+ }
+ /* 2019-09-12, NumPy 1.18 */
+ if (DEPRECATE(
+ "string or file could not be read to its end due to unmatched "
+ "data; this will raise a ValueError in the future.") < 0) {
+ goto fail;
+ }
+ }
+
free(clean_sep);
fail:
@@ -3881,7 +3883,13 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
s = (npy_intp)ts - offset;
n = (npy_intp)count;
itemsize = type->elsize;
- if (n < 0 ) {
+ if (n < 0) {
+ if (itemsize == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot determine count if itemsize is 0");
+ Py_DECREF(type);
+ return NULL;
+ }
if (s % itemsize != 0) {
PyErr_SetString(PyExc_ValueError,
"buffer size must be a multiple"\
@@ -4000,7 +4008,7 @@ PyArray_FromString(char *data, npy_intp slen, PyArray_Descr *dtype,
size_t nread = 0;
char *end;
- if (dtype->f->scanfunc == NULL) {
+ if (dtype->f->fromstr == NULL) {
PyErr_SetString(PyExc_ValueError,
"don't know how to read " \
"character strings with that " \
@@ -4074,7 +4082,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
}
for (i = 0; (i < count || count == -1) &&
(value = PyIter_Next(iter)); i++) {
- if (i >= elcount) {
+ if (i >= elcount && elsize != 0) {
npy_intp nbytes;
/*
Grow PyArray_DATA(ret):
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 768eb1e64..82e046ca1 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -386,7 +386,8 @@ convert_datetimestruct_to_datetime(PyArray_DatetimeMetaData *meta,
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT npy_datetime
-PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d)
+PyArray_DatetimeStructToDatetime(
+ NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d))
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_DatetimeStructToDatetime function has "
@@ -400,7 +401,8 @@ PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d)
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT npy_datetime
-PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d)
+PyArray_TimedeltaStructToTimedelta(
+ NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d))
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_TimedeltaStructToTimedelta function has "
@@ -600,8 +602,9 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta,
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT void
-PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr,
- npy_datetimestruct *result)
+PyArray_DatetimeToDatetimeStruct(
+ npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr),
+ npy_datetimestruct *result)
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_DatetimeToDatetimeStruct function has "
@@ -621,8 +624,9 @@ PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr,
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT void
-PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT fr,
- npy_timedeltastruct *result)
+PyArray_TimedeltaToTimedeltaStruct(
+ npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr),
+ npy_timedeltastruct *result)
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_TimedeltaToTimedeltaStruct function has "
@@ -2272,7 +2276,10 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (tmp == NULL) {
return -1;
}
- seconds_offset = PyInt_AsLong(tmp);
+ /* Rounding here is no worse than the integer division below.
+ * Only whole minute offsets are supported by numpy anyway.
+ */
+ seconds_offset = (int)PyFloat_AsDouble(tmp);
if (error_converting(seconds_offset)) {
Py_DECREF(tmp);
return -1;
@@ -3125,7 +3132,7 @@ is_any_numpy_datetime_or_timedelta(PyObject *obj)
*/
NPY_NO_EXPORT int
convert_pyobjects_to_datetimes(int count,
- PyObject **objs, int *type_nums,
+ PyObject **objs, const int *type_nums,
NPY_CASTING casting,
npy_int64 *out_values,
PyArray_DatetimeMetaData *inout_meta)
diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c
index c04a6c125..cdeb65d0e 100644
--- a/numpy/core/src/multiarray/datetime_busday.c
+++ b/numpy/core/src/multiarray/datetime_busday.c
@@ -48,7 +48,7 @@ get_day_of_week(npy_datetime date)
*/
static int
is_holiday(npy_datetime date,
- npy_datetime *holidays_begin, npy_datetime *holidays_end)
+ npy_datetime *holidays_begin, const npy_datetime *holidays_end)
{
npy_datetime *trial;
@@ -88,7 +88,7 @@ is_holiday(npy_datetime date,
*/
static npy_datetime *
find_earliest_holiday_on_or_after(npy_datetime date,
- npy_datetime *holidays_begin, npy_datetime *holidays_end)
+ npy_datetime *holidays_begin, const npy_datetime *holidays_end)
{
npy_datetime *trial;
@@ -127,7 +127,7 @@ find_earliest_holiday_on_or_after(npy_datetime date,
*/
static npy_datetime *
find_earliest_holiday_after(npy_datetime date,
- npy_datetime *holidays_begin, npy_datetime *holidays_end)
+ npy_datetime *holidays_begin, const npy_datetime *holidays_end)
{
npy_datetime *trial;
@@ -159,7 +159,7 @@ static int
apply_business_day_roll(npy_datetime date, npy_datetime *out,
int *out_day_of_week,
NPY_BUSDAY_ROLL roll,
- npy_bool *weekmask,
+ const npy_bool *weekmask,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
int day_of_week;
@@ -361,7 +361,7 @@ apply_business_day_offset(npy_datetime date, npy_int64 offset,
static int
apply_business_day_count(npy_datetime date_begin, npy_datetime date_end,
npy_int64 *out,
- npy_bool *weekmask, int busdays_in_weekmask,
+ const npy_bool *weekmask, int busdays_in_weekmask,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
npy_int64 count, whole_weeks;
@@ -722,7 +722,7 @@ finish:
*/
NPY_NO_EXPORT PyArrayObject *
is_business_day(PyArrayObject *dates, PyArrayObject *out,
- npy_bool *weekmask, int busdays_in_weekmask,
+ const npy_bool *weekmask, int busdays_in_weekmask,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
PyArray_DatetimeMetaData temp_meta;
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index ff85c3fcb..734255a9d 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -102,6 +102,7 @@ _arraydescr_from_dtype_attr(PyObject *obj, PyArray_Descr **newdescr)
if (Py_EnterRecursiveCall(
" while trying to convert the given data type from its "
"`.dtype` attribute.") != 0) {
+ Py_DECREF(dtypedescr);
return 1;
}
@@ -148,7 +149,7 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args)
arg == '|' || arg == '=')
static int
-_check_for_commastring(char *type, Py_ssize_t len)
+_check_for_commastring(const char *type, Py_ssize_t len)
{
Py_ssize_t i;
int sqbracket;
@@ -497,9 +498,6 @@ _convert_from_array_descr(PyObject *obj, int align)
else {
ret = PyArray_DescrConverter(PyTuple_GET_ITEM(item, 1), &conv);
}
- if (ret == NPY_FAIL) {
- PyObject_Print(PyTuple_GET_ITEM(item, 1), stderr, 0);
- }
}
else if (PyTuple_GET_SIZE(item) == 3) {
newobj = PyTuple_GetSlice(item, 1, 3);
@@ -517,6 +515,7 @@ _convert_from_array_descr(PyObject *obj, int align)
if (ret == NPY_FAIL) {
goto fail;
}
+
if ((PyDict_GetItem(fields, name) != NULL)
|| (title
&& PyBaseString_Check(title)
@@ -3278,7 +3277,7 @@ arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op)
}
static int
-descr_nonzero(PyObject *self)
+descr_nonzero(PyObject *NPY_UNUSED(self))
{
/* `bool(np.dtype(...)) == True` for all dtypes. Needed to override default
* nonzero implementation, which checks if `len(object) > 0`. */
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index a90416a40..ef0dd4a01 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -3337,7 +3337,7 @@ get_decsrcref_transfer_function(int aligned,
/* If there are subarrays, need to wrap it */
else if (PyDataType_HASSUBARRAY(src_dtype)) {
PyArray_Dims src_shape = {NULL, -1};
- npy_intp src_size = 1;
+ npy_intp src_size;
PyArray_StridedUnaryOp *stransfer;
NpyAuxData *data;
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index bed92403f..116e37ce5 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -190,7 +190,7 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
static PyObject *
-array_priority_get(PyArrayObject *self)
+array_priority_get(PyArrayObject *NPY_UNUSED(self))
{
return PyFloat_FromDouble(NPY_PRIORITY);
}
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 762563eb5..a6ac902d3 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -1336,7 +1336,11 @@ PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int axis,
PyArray_ArgSortFunc *argsort;
PyObject *ret;
- if (which < 0 || which >= NPY_NSELECTS) {
+ /*
+ * As a C-exported function, enum NPY_SELECTKIND loses its enum property
+ * Check the values to make sure they are in range
+ */
+ if ((int)which < 0 || (int)which >= NPY_NSELECTS) {
PyErr_SetString(PyExc_ValueError,
"not a valid partition kind");
return NULL;
@@ -1456,8 +1460,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
/* Now we can check the axis */
nd = PyArray_NDIM(mps[0]);
- if ((nd == 0) || (PyArray_SIZE(mps[0]) == 1)) {
- /* single element case */
+ if ((nd == 0) || (PyArray_SIZE(mps[0]) <= 1)) {
+ /* empty/single element case */
ret = (PyArrayObject *)PyArray_NewFromDescr(
&PyArray_Type, PyArray_DescrFromType(NPY_INTP),
PyArray_NDIM(mps[0]), PyArray_DIMS(mps[0]), NULL, NULL,
@@ -1466,7 +1470,9 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
if (ret == NULL) {
goto fail;
}
- *((npy_intp *)(PyArray_DATA(ret))) = 0;
+ if (PyArray_SIZE(mps[0]) > 0) {
+ *((npy_intp *)(PyArray_DATA(ret))) = 0;
+ }
goto finish;
}
if (check_and_adjust_axis(&axis, nd) < 0) {
@@ -1516,19 +1522,28 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
char *valbuffer, *indbuffer;
int *swaps;
- if (N == 0 || maxelsize == 0 || sizeof(npy_intp) == 0) {
- goto fail;
+ assert(N > 0); /* Guaranteed and assumed by indbuffer */
+ npy_intp valbufsize = N * maxelsize;
+ if (NPY_UNLIKELY(valbufsize) == 0) {
+ valbufsize = 1; /* Ensure allocation is not empty */
}
- valbuffer = PyDataMem_NEW(N * maxelsize);
+
+ valbuffer = PyDataMem_NEW(valbufsize);
if (valbuffer == NULL) {
goto fail;
}
indbuffer = PyDataMem_NEW(N * sizeof(npy_intp));
if (indbuffer == NULL) {
+ PyDataMem_FREE(valbuffer);
+ goto fail;
+ }
+ swaps = malloc(NPY_LIKELY(n > 0) ? n * sizeof(int) : 1);
+ if (swaps == NULL) {
+ PyDataMem_FREE(valbuffer);
PyDataMem_FREE(indbuffer);
goto fail;
}
- swaps = malloc(n*sizeof(int));
+
for (j = 0; j < n; j++) {
swaps[j] = PyArray_ISBYTESWAPPED(mps[j]);
}
@@ -1557,8 +1572,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
#else
if (rcode < 0) {
#endif
- npy_free_cache(valbuffer, N * maxelsize);
- npy_free_cache(indbuffer, N * sizeof(npy_intp));
+ PyDataMem_FREE(valbuffer);
+ PyDataMem_FREE(indbuffer);
free(swaps);
goto fail;
}
@@ -2464,7 +2479,7 @@ finish:
* array of values, which must be of length PyArray_NDIM(self).
*/
NPY_NO_EXPORT PyObject *
-PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index)
+PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index)
{
int idim, ndim = PyArray_NDIM(self);
char *data = PyArray_DATA(self);
@@ -2492,7 +2507,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index)
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index,
+PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index,
PyObject *obj)
{
int idim, ndim = PyArray_NDIM(self);
diff --git a/numpy/core/src/multiarray/item_selection.h b/numpy/core/src/multiarray/item_selection.h
index 90bb5100d..2276b4db7 100644
--- a/numpy/core/src/multiarray/item_selection.h
+++ b/numpy/core/src/multiarray/item_selection.h
@@ -15,7 +15,7 @@ count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides);
* array of values, which must be of length PyArray_NDIM(self).
*/
NPY_NO_EXPORT PyObject *
-PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index);
+PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index);
/*
* Sets a single item in the array, based on a single multi-index
@@ -24,7 +24,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index);
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index,
+PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index,
PyObject *obj);
#endif
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 83eafaf74..e66bb36aa 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -98,7 +98,7 @@ parse_index_entry(PyObject *op, npy_intp *step_size,
/* get the dataptr from its current coordinates for simple iterator */
static char*
-get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates)
+get_ptr_simple(PyArrayIterObject* iter, const npy_intp *coordinates)
{
npy_intp i;
char *ret;
@@ -116,10 +116,12 @@ get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates)
* This is common initialization code between PyArrayIterObject and
* PyArrayNeighborhoodIterObject
*
- * Increase ao refcount
+ * Steals a reference to the array object which gets removed at deallocation,
+ * if the iterator is allocated statically and its dealloc not called, it
+ * can be thought of as borrowing the reference.
*/
-static PyObject *
-array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao)
+NPY_NO_EXPORT void
+PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao)
{
int nd, i;
@@ -131,7 +133,6 @@ array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao)
else {
it->contiguous = 0;
}
- Py_INCREF(ao);
it->ao = ao;
it->size = PyArray_SIZE(ao);
it->nd_m1 = nd - 1;
@@ -155,7 +156,7 @@ array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao)
it->translate = &get_ptr_simple;
PyArray_ITER_RESET(it);
- return (PyObject *)it;
+ return;
}
static void
@@ -170,6 +171,10 @@ array_iter_base_dealloc(PyArrayIterObject *it)
NPY_NO_EXPORT PyObject *
PyArray_IterNew(PyObject *obj)
{
+ /*
+ * Note that internall PyArray_RawIterBaseInit may be called directly on a
+ * statically allocated PyArrayIterObject.
+ */
PyArrayIterObject *it;
PyArrayObject *ao;
@@ -186,7 +191,8 @@ PyArray_IterNew(PyObject *obj)
return NULL;
}
- array_iter_base_init(it, ao);
+ Py_INCREF(ao); /* PyArray_RawIterBaseInit steals a reference */
+ PyArray_RawIterBaseInit(it, ao);
return (PyObject *)it;
}
@@ -390,6 +396,10 @@ arrayiter_next(PyArrayIterObject *it)
static void
arrayiter_dealloc(PyArrayIterObject *it)
{
+ /*
+ * Note that it is possible to statically allocate a PyArrayIterObject,
+ * which does not call this function.
+ */
array_iter_base_dealloc(it);
PyArray_free(it);
}
@@ -830,7 +840,6 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val)
if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) {
goto finish;
}
- retval = 0;
PyArray_ITER_GOTO1D(self, start);
retval = type->f->setitem(val, self->dataptr, self->ao);
PyArray_ITER_RESET(self);
@@ -1656,7 +1665,7 @@ static char* _set_constant(PyArrayNeighborhoodIterObject* iter,
/* set the dataptr from its current coordinates */
static char*
-get_ptr_constant(PyArrayIterObject* _iter, npy_intp *coordinates)
+get_ptr_constant(PyArrayIterObject* _iter, const npy_intp *coordinates)
{
int i;
npy_intp bd, _coordinates[NPY_MAXDIMS];
@@ -1711,7 +1720,7 @@ __npy_pos_remainder(npy_intp i, npy_intp n)
/* set the dataptr from its current coordinates */
static char*
-get_ptr_mirror(PyArrayIterObject* _iter, npy_intp *coordinates)
+get_ptr_mirror(PyArrayIterObject* _iter, const npy_intp *coordinates)
{
int i;
npy_intp bd, _coordinates[NPY_MAXDIMS], lb;
@@ -1745,7 +1754,7 @@ __npy_euclidean_division(npy_intp i, npy_intp n)
_coordinates[c] = lb + __npy_euclidean_division(bd, p->limits_sizes[c]);
static char*
-get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates)
+get_ptr_circular(PyArrayIterObject* _iter, const npy_intp *coordinates)
{
int i;
npy_intp bd, _coordinates[NPY_MAXDIMS], lb;
@@ -1767,7 +1776,7 @@ get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates)
* A Neighborhood Iterator object.
*/
NPY_NO_EXPORT PyObject*
-PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp *bounds,
+PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp *bounds,
int mode, PyArrayObject* fill)
{
int i;
@@ -1779,7 +1788,8 @@ PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp *bounds,
}
PyObject_Init((PyObject *)ret, &PyArrayNeighborhoodIter_Type);
- array_iter_base_init((PyArrayIterObject*)ret, x->ao);
+ Py_INCREF(x->ao); /* PyArray_RawIterBaseInit steals a reference */
+ PyArray_RawIterBaseInit((PyArrayIterObject*)ret, x->ao);
Py_INCREF(x);
ret->_internal_iter = x;
diff --git a/numpy/core/src/multiarray/iterators.h b/numpy/core/src/multiarray/iterators.h
index 376dc154a..d942f45b8 100644
--- a/numpy/core/src/multiarray/iterators.h
+++ b/numpy/core/src/multiarray/iterators.h
@@ -7,4 +7,7 @@ NPY_NO_EXPORT PyObject
NPY_NO_EXPORT int
iter_ass_subscript(PyArrayIterObject *, PyObject *, PyObject *);
+NPY_NO_EXPORT void
+PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao);
+
#endif
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index add1143b2..247864775 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -176,7 +176,7 @@ unpack_tuple(PyTupleObject *index, PyObject **result, npy_intp result_n)
/* Unpack a single scalar index, taking a new reference to match unpack_tuple */
static NPY_INLINE npy_intp
-unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n)
+unpack_scalar(PyObject *index, PyObject **result, npy_intp NPY_UNUSED(result_n))
{
Py_INCREF(index);
result[0] = index;
@@ -1699,7 +1699,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
PyArray_SHAPE(tmp_arr),
PyArray_STRIDES(tmp_arr),
PyArray_BYTES(tmp_arr),
- PyArray_FLAGS(self),
+ PyArray_FLAGS(tmp_arr),
(PyObject *)self, (PyObject *)tmp_arr);
Py_DECREF(tmp_arr);
if (result == NULL) {
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 79c60aa2e..e5845f2f6 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -1051,7 +1051,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds)
NPY_NO_EXPORT PyObject *
-array_ufunc(PyArrayObject *self, PyObject *args, PyObject *kwds)
+array_ufunc(PyArrayObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
{
PyObject *ufunc, *method_name, *normal_args, *ufunc_method;
PyObject *result = NULL;
@@ -1100,7 +1100,7 @@ cleanup:
}
static PyObject *
-array_function(PyArrayObject *self, PyObject *c_args, PyObject *c_kwds)
+array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kwds)
{
PyObject *func, *types, *args, *kwargs, *result;
static char *kwlist[] = {"func", "types", "args", "kwargs", NULL};
@@ -1179,7 +1179,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
- ret = PyArray_Resize(self, &newshape, refcheck, NPY_CORDER);
+ ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER);
npy_free_cache_dim_obj(newshape);
if (ret == NULL) {
return NULL;
@@ -1732,7 +1732,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
}
static PyObject *
-array_reduce_ex_regular(PyArrayObject *self, int protocol)
+array_reduce_ex_regular(PyArrayObject *self, int NPY_UNUSED(protocol))
{
PyObject *subclass_array_reduce = NULL;
PyObject *ret;
@@ -1861,7 +1861,7 @@ array_reduce_ex(PyArrayObject *self, PyObject *args)
PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) ||
(PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) &&
((PyObject*)self)->ob_type != &PyArray_Type) ||
- PyDataType_ISUNSIZED(descr)) {
+ descr->elsize == 0) {
/* The PickleBuffer class from version 5 of the pickle protocol
* can only be used for arrays backed by a contiguous data buffer.
* For all other cases we fallback to the generic array_reduce
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index bef978c94..441567049 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -286,7 +286,8 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd,
* Convert to a 1D C-array
*/
NPY_NO_EXPORT int
-PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode)
+PyArray_As1D(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr),
+ int *NPY_UNUSED(d1), int NPY_UNUSED(typecode))
{
/* 2008-07-14, 1.5 */
PyErr_SetString(PyExc_NotImplementedError,
@@ -298,7 +299,8 @@ PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode)
* Convert to a 2D C-array
*/
NPY_NO_EXPORT int
-PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode)
+PyArray_As2D(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr),
+ int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode))
{
/* 2008-07-14, 1.5 */
PyErr_SetString(PyExc_NotImplementedError,
@@ -1560,7 +1562,8 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
PyArrayObject *oparr = NULL, *ret = NULL;
npy_bool subok = NPY_FALSE;
npy_bool copy = NPY_TRUE;
- int ndmin = 0, nd;
+ int nd;
+ npy_intp ndmin = 0;
PyArray_Descr *type = NULL;
PyArray_Descr *oldtype = NULL;
NPY_ORDER order = NPY_KEEPORDER;
@@ -1631,12 +1634,10 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
}
}
- /* copy=False with default dtype, order and ndim */
- if (STRIDING_OK(oparr, order)) {
- ret = oparr;
- Py_INCREF(ret);
- goto finish;
- }
+ /* copy=False with default dtype, order (any is OK) and ndim */
+ ret = oparr;
+ Py_INCREF(ret);
+ goto finish;
}
}
@@ -3781,7 +3782,7 @@ _vec_string_no_args(PyArrayObject* char_array,
}
static PyObject *
-_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
+_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds))
{
PyArrayObject* char_array = NULL;
PyArray_Descr *type;
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index 18ca127e1..db0bfcece 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -1628,15 +1628,12 @@ npyiter_coalesce_axes(NpyIter *iter)
npy_intp istrides, nstrides = NAD_NSTRIDES();
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
- NpyIter_AxisData *ad_compress;
+ NpyIter_AxisData *ad_compress = axisdata;
npy_intp new_ndim = 1;
/* The HASMULTIINDEX or IDENTPERM flags do not apply after coalescing */
NIT_ITFLAGS(iter) &= ~(NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_HASMULTIINDEX);
- axisdata = NIT_AXISDATA(iter);
- ad_compress = axisdata;
-
for (idim = 0; idim < ndim-1; ++idim) {
int can_coalesce = 1;
npy_intp shape0 = NAD_SHAPE(ad_compress);
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 3b3635afe..d40836dc2 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -24,7 +24,7 @@ static int
npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags);
static int
npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
- npy_intp *itershape);
+ const npy_intp *itershape);
static int
npyiter_calculate_ndim(int nop, PyArrayObject **op_in,
int oa_ndim);
@@ -55,7 +55,7 @@ npyiter_check_casting(int nop, PyArrayObject **op,
static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
- npy_uint32 *op_flags, int **op_axes,
+ const npy_uint32 *op_flags, int **op_axes,
npy_intp *itershape);
static void
npyiter_replace_axisdata(NpyIter *iter, int iop,
@@ -74,23 +74,23 @@ static void
npyiter_find_best_axis_ordering(NpyIter *iter);
static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
+ const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
int only_inputs);
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
int op_ndim, npy_intp *shape,
- PyArray_Descr *op_dtype, int *op_axes);
+ PyArray_Descr *op_dtype, const int *op_axes);
static int
npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
- npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
+ const npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
int **op_axes);
static void
npyiter_get_priority_subtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags,
+ const npyiter_opitflags *op_itflags,
double *subtype_priority, PyTypeObject **subtype);
static int
npyiter_allocate_transfer_functions(NpyIter *iter);
@@ -787,7 +787,7 @@ npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags)
static int
npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
- npy_intp *itershape)
+ const npy_intp *itershape)
{
char axes_dupcheck[NPY_MAXDIMS];
int iop, idim;
@@ -1423,7 +1423,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop)
static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
- npy_uint32 *op_flags, int **op_axes,
+ const npy_uint32 *op_flags, int **op_axes,
npy_intp *itershape)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
@@ -2409,7 +2409,7 @@ npyiter_find_best_axis_ordering(NpyIter *iter)
*/
static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
+ const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
int only_inputs)
{
@@ -2477,7 +2477,7 @@ static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
int op_ndim, npy_intp *shape,
- PyArray_Descr *op_dtype, int *op_axes)
+ PyArray_Descr *op_dtype, const int *op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
@@ -2706,7 +2706,7 @@ static int
npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
- npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
+ const npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
int **op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
@@ -3109,7 +3109,7 @@ npyiter_allocate_arrays(NpyIter *iter,
*/
static void
npyiter_get_priority_subtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags,
+ const npyiter_opitflags *op_itflags,
double *subtype_priority,
PyTypeObject **subtype)
{
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index ffea08bb3..4b9d41aa4 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -82,7 +82,8 @@ static int npyiter_cache_values(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
+npyiter_new(PyTypeObject *subtype, PyObject *NPY_UNUSED(args),
+ PyObject *NPY_UNUSED(kwds))
{
NewNpyArrayIterObject *self;
@@ -535,7 +536,7 @@ try_single_dtype:
}
static int
-npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
+npyiter_convert_op_axes(PyObject *op_axes_in, int nop,
int **op_axes, int *oa_ndim)
{
PyObject *a;
@@ -2365,7 +2366,7 @@ npyiter_close(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_exit(NewNpyArrayIterObject *self, PyObject *args)
+npyiter_exit(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
/* even if called via exception handling, writeback any data */
return npyiter_close(self);
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 0ceb994ef..dabc866ff 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -391,7 +391,8 @@ array_matrix_multiply(PyArrayObject *m1, PyObject *m2)
}
static PyObject *
-array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2)
+array_inplace_matrix_multiply(
+ PyArrayObject *NPY_UNUSED(m1), PyObject *NPY_UNUSED(m2))
{
PyErr_SetString(PyExc_TypeError,
"In-place matrix multiplication is not (yet) supported. "
diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c
index b8230c81a..6033929d9 100644
--- a/numpy/core/src/multiarray/refcount.c
+++ b/numpy/core/src/multiarray/refcount.c
@@ -11,6 +11,7 @@
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
#include "numpy/arrayscalars.h"
+#include "iterators.h"
#include "npy_config.h"
@@ -210,21 +211,22 @@ PyArray_XDECREF(PyArrayObject *mp)
npy_intp i, n;
PyObject **data;
PyObject *temp;
- PyArrayIterObject *it;
+ /*
+ * statically allocating it allows this function to not modify the
+ * reference count of the array for use during dealloc.
+ * (statically is not necessary as such)
+ */
+ PyArrayIterObject it;
if (!PyDataType_REFCHK(PyArray_DESCR(mp))) {
return 0;
}
if (PyArray_DESCR(mp)->type_num != NPY_OBJECT) {
- it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp);
- if (it == NULL) {
- return -1;
+ PyArray_RawIterBaseInit(&it, mp);
+ while(it.index < it.size) {
+ PyArray_Item_XDECREF(it.dataptr, PyArray_DESCR(mp));
+ PyArray_ITER_NEXT(&it);
}
- while(it->index < it->size) {
- PyArray_Item_XDECREF(it->dataptr, PyArray_DESCR(mp));
- PyArray_ITER_NEXT(it);
- }
- Py_DECREF(it);
return 0;
}
@@ -242,16 +244,12 @@ PyArray_XDECREF(PyArrayObject *mp)
}
}
else { /* handles misaligned data too */
- it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp);
- if (it == NULL) {
- return -1;
- }
- while(it->index < it->size) {
- NPY_COPY_PYOBJECT_PTR(&temp, it->dataptr);
+ PyArray_RawIterBaseInit(&it, mp);
+ while(it.index < it.size) {
+ NPY_COPY_PYOBJECT_PTR(&temp, it.dataptr);
Py_XDECREF(temp);
- PyArray_ITER_NEXT(it);
+ PyArray_ITER_NEXT(&it);
}
- Py_DECREF(it);
}
return 0;
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 34839b866..9adca6773 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -4492,6 +4492,36 @@ initialize_numeric_types(void)
PyArrayIter_Type.tp_iter = PyObject_SelfIter;
PyArrayMapIter_Type.tp_iter = PyObject_SelfIter;
+
+ /*
+ * Give types different names when they are the same size (gh-9799).
+ * `np.intX` always refers to the first int of that size in the sequence
+ * `['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']`.
+ */
+#if (NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT)
+ PyByteArrType_Type.tp_name = "numpy.byte";
+ PyUByteArrType_Type.tp_name = "numpy.ubyte";
+#endif
+#if (NPY_SIZEOF_SHORT == NPY_SIZEOF_INT)
+ PyShortArrType_Type.tp_name = "numpy.short";
+ PyUShortArrType_Type.tp_name = "numpy.ushort";
+#endif
+#if (NPY_SIZEOF_INT == NPY_SIZEOF_LONG)
+ PyIntArrType_Type.tp_name = "numpy.intc";
+ PyUIntArrType_Type.tp_name = "numpy.uintc";
+#endif
+#if (NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG)
+ PyLongLongArrType_Type.tp_name = "numpy.longlong";
+ PyULongLongArrType_Type.tp_name = "numpy.ulonglong";
+#endif
+
+ /*
+ Do the same for longdouble
+ */
+#if (NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE)
+ PyLongDoubleArrType_Type.tp_name = "numpy.longdouble";
+ PyCLongDoubleArrType_Type.tp_name = "numpy.clongdouble";
+#endif
}
typedef struct {
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 30820737e..4e31f003b 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -26,7 +26,7 @@ static int
_fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr);
static int
-_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
+_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
npy_intp *newstrides, int is_f_order);
static void
@@ -40,11 +40,11 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype);
*/
NPY_NO_EXPORT PyObject *
PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
- NPY_ORDER order)
+ NPY_ORDER NPY_UNUSED(order))
{
npy_intp oldnbytes, newnbytes;
npy_intp oldsize, newsize;
- int new_nd=newshape->len, k, n, elsize;
+ int new_nd=newshape->len, k, elsize;
int refcnt;
npy_intp* new_dimensions=newshape->ptr;
npy_intp new_strides[NPY_MAXDIMS];
@@ -136,8 +136,8 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
PyObject *zero = PyInt_FromLong(0);
char *optr;
optr = PyArray_BYTES(self) + oldnbytes;
- n = newsize - oldsize;
- for (k = 0; k < n; k++) {
+ npy_intp n_new = newsize - oldsize;
+ for (npy_intp i = 0; i < n_new; i++) {
_putzero((char *)optr, zero, PyArray_DESCR(self));
optr += elsize;
}
@@ -361,7 +361,7 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype)
* stride of the next-fastest index.
*/
static int
-_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
+_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
npy_intp *newstrides, int is_f_order)
{
int oldnd;
@@ -766,7 +766,7 @@ static int _npy_stride_sort_item_comparator(const void *a, const void *b)
* [(2, 12), (0, 4), (1, -2)].
*/
NPY_NO_EXPORT void
-PyArray_CreateSortedStridePerm(int ndim, npy_intp *strides,
+PyArray_CreateSortedStridePerm(int ndim, npy_intp const *strides,
npy_stride_sort_item *out_strideperm)
{
int i;
@@ -1048,7 +1048,7 @@ build_shape_string(npy_intp n, npy_intp *vals)
* from a reduction result once its computation is complete.
*/
NPY_NO_EXPORT void
-PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags)
+PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags)
{
PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
npy_intp *shape = fa->dimensions, *strides = fa->strides;
diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src
index dad381232..8c432e483 100644
--- a/numpy/core/src/npymath/npy_math_complex.c.src
+++ b/numpy/core/src/npymath/npy_math_complex.c.src
@@ -40,13 +40,14 @@
* flag in an efficient way. The flag is IEEE specific. See
* https://github.com/freebsd/freebsd/blob/4c6378299/lib/msun/src/catrig.c#L42
*/
+#if !defined(HAVE_CACOSF) || !defined(HAVE_CACOSL) || !defined(HAVE_CASINHF) || !defined(HAVE_CASINHL)
#define raise_inexact() do { \
volatile npy_float NPY_UNUSED(junk) = 1 + tiny; \
} while (0)
static const volatile npy_float tiny = 3.9443045e-31f;
-
+#endif
/**begin repeat
* #type = npy_float, npy_double, npy_longdouble#
@@ -64,9 +65,6 @@ static const volatile npy_float tiny = 3.9443045e-31f;
* Constants
*=========================================================*/
static const @ctype@ c_1@c@ = {1.0@C@, 0.0};
-static const @ctype@ c_half@c@ = {0.5@C@, 0.0};
-static const @ctype@ c_i@c@ = {0.0, 1.0@C@};
-static const @ctype@ c_ihalf@c@ = {0.0, 0.5@C@};
/*==========================================================
* Helper functions
@@ -76,22 +74,6 @@ static const @ctype@ c_ihalf@c@ = {0.0, 0.5@C@};
*=========================================================*/
static NPY_INLINE
@ctype@
-cadd@c@(@ctype@ a, @ctype@ b)
-{
- return npy_cpack@c@(npy_creal@c@(a) + npy_creal@c@(b),
- npy_cimag@c@(a) + npy_cimag@c@(b));
-}
-
-static NPY_INLINE
-@ctype@
-csub@c@(@ctype@ a, @ctype@ b)
-{
- return npy_cpack@c@(npy_creal@c@(a) - npy_creal@c@(b),
- npy_cimag@c@(a) - npy_cimag@c@(b));
-}
-
-static NPY_INLINE
-@ctype@
cmul@c@(@ctype@ a, @ctype@ b)
{
@type@ ar, ai, br, bi;
@@ -132,20 +114,6 @@ cdiv@c@(@ctype@ a, @ctype@ b)
}
}
-static NPY_INLINE
-@ctype@
-cneg@c@(@ctype@ a)
-{
- return npy_cpack@c@(-npy_creal@c@(a), -npy_cimag@c@(a));
-}
-
-static NPY_INLINE
-@ctype@
-cmuli@c@(@ctype@ a)
-{
- return npy_cpack@c@(-npy_cimag@c@(a), npy_creal@c@(a));
-}
-
/*==========================================================
* Custom implementation of missing complex C99 functions
*=========================================================*/
diff --git a/numpy/core/src/npymath/npy_math_internal.h.src b/numpy/core/src/npymath/npy_math_internal.h.src
index fa820baac..18b6d1434 100644
--- a/numpy/core/src/npymath/npy_math_internal.h.src
+++ b/numpy/core/src/npymath/npy_math_internal.h.src
@@ -716,3 +716,44 @@ npy_@func@@c@(@type@ a, @type@ b)
return npy_@func@u@c@(a < 0 ? -a : a, b < 0 ? -b : b);
}
/**end repeat**/
+
+/* Unlike LCM and GCD, we need byte and short variants for the shift operators,
+ * since the result is dependent on the width of the type
+ */
+/**begin repeat
+ *
+ * #type = byte, short, int, long, longlong#
+ * #c = hh,h,,l,ll#
+ */
+/**begin repeat1
+ *
+ * #u = u,#
+ * #is_signed = 0,1#
+ */
+NPY_INPLACE npy_@u@@type@
+npy_lshift@u@@c@(npy_@u@@type@ a, npy_@u@@type@ b)
+{
+ if (NPY_LIKELY((size_t)b < sizeof(a) * CHAR_BIT)) {
+ return a << b;
+ }
+ else {
+ return 0;
+ }
+}
+NPY_INPLACE npy_@u@@type@
+npy_rshift@u@@c@(npy_@u@@type@ a, npy_@u@@type@ b)
+{
+ if (NPY_LIKELY((size_t)b < sizeof(a) * CHAR_BIT)) {
+ return a >> b;
+ }
+#if @is_signed@
+ else if (a < 0) {
+ return (npy_@u@@type@)-1; /* preserve the sign bit */
+ }
+#endif
+ else {
+ return 0;
+ }
+}
+/**end repeat1**/
+/**end repeat**/
diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src
index c90b06974..72887d7e4 100644
--- a/numpy/core/src/npysort/radixsort.c.src
+++ b/numpy/core/src/npysort/radixsort.c.src
@@ -198,9 +198,9 @@ aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED(
return 0;
}
- k1 = KEY_OF(arr[0]);
+ k1 = KEY_OF(arr[tosort[0]]);
for (npy_intp i = 1; i < num; i++) {
- k2 = KEY_OF(arr[i]);
+ k2 = KEY_OF(arr[tosort[i]]);
if (k1 > k2) {
all_sorted = 0;
break;
diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src
index 9e74845df..615e395c7 100644
--- a/numpy/core/src/umath/_rational_tests.c.src
+++ b/numpy/core/src/umath/_rational_tests.c.src
@@ -539,11 +539,11 @@ static PyObject*
pyrational_str(PyObject* self) {
rational x = ((PyRational*)self)->r;
if (d(x)!=1) {
- return PyString_FromFormat(
+ return PyUString_FromFormat(
"%ld/%ld",(long)x.n,(long)d(x));
}
else {
- return PyString_FromFormat(
+ return PyUString_FromFormat(
"%ld",(long)x.n);
}
}
diff --git a/numpy/core/src/umath/cpuid.c b/numpy/core/src/umath/cpuid.c
index 8673f1736..72c6493e8 100644
--- a/numpy/core/src/umath/cpuid.c
+++ b/numpy/core/src/umath/cpuid.c
@@ -48,6 +48,25 @@ int os_avx512_support(void)
#endif
}
+static NPY_INLINE
+int cpu_supports_fma(void)
+{
+#ifdef __x86_64__
+ unsigned int feature = 0x01;
+ unsigned int a, b, c, d;
+ __asm__ volatile (
+ "cpuid" "\n\t"
+ : "=a" (a), "=b" (b), "=c" (c), "=d" (d)
+ : "a" (feature));
+ /*
+ * FMA is the 12th bit of ECX
+ */
+ return (c >> 12) & 1;
+#else
+ return 0;
+#endif
+}
+
/*
* Primitive cpu feature detect function
* Currently only supports checking for avx on gcc compatible compilers.
@@ -63,6 +82,9 @@ npy_cpu_supports(const char * feature)
return 0;
#endif
}
+ else if (strcmp(feature, "fma") == 0) {
+ return cpu_supports_fma() && __builtin_cpu_supports("avx2") && os_avx_support();
+ }
else if (strcmp(feature, "avx2") == 0) {
return __builtin_cpu_supports("avx2") && os_avx_support();
}
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 1a4885133..5443223ab 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -699,6 +699,7 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
* #ftype = npy_float, npy_float, npy_float, npy_float, npy_double, npy_double,
* npy_double, npy_double, npy_double, npy_double#
* #SIGNED = 1, 0, 1, 0, 1, 0, 1, 0, 1, 0#
+ * #c = hh,uhh,h,uh,,u,l,ul,ll,ull#
*/
#define @TYPE@_floor_divide @TYPE@_divide
@@ -776,16 +777,15 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
/**begin repeat2
* Arithmetic
- * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor,
- * left_shift, right_shift#
- * #OP = +, -,*, &, |, ^, <<, >>#
+ * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor#
+ * #OP = +, -, *, &, |, ^#
*/
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
@TYPE@_@kind@@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- if(IS_BINARY_REDUCE) {
+ if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
io1 @OP@= *(@type@ *)ip2;
}
@@ -799,6 +799,47 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
/**end repeat2**/
+/*
+ * Arithmetic bit shift operations.
+ *
+ * Intel hardware masks bit shift values, so large shifts wrap around
+ * and can produce surprising results. The special handling ensures that
+ * behavior is independent of compiler or hardware.
+ * TODO: We could implement consistent behavior for negative shifts,
+ * which is undefined in C.
+ */
+
+#define INT_left_shift_needs_clear_floatstatus
+#define UINT_left_shift_needs_clear_floatstatus
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_left_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ BINARY_LOOP_FAST(@type@, @type@, *out = npy_lshift@c@(in1, in2));
+
+#ifdef @TYPE@_left_shift_needs_clear_floatstatus
+ // For some reason, our macOS CI sets an "invalid" flag here, but only
+ // for some types.
+ npy_clear_floatstatus_barrier((char*)dimensions);
+#endif
+}
+
+#undef INT_left_shift_needs_clear_floatstatus
+#undef UINT_left_shift_needs_clear_floatstatus
+
+NPY_NO_EXPORT
+#ifndef NPY_DO_NOT_OPTIMIZE_@TYPE@_right_shift
+NPY_GCC_OPT_3
+#endif
+void
+@TYPE@_right_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ BINARY_LOOP_FAST(@type@, @type@, *out = npy_rshift@c@(in1, in2));
+}
+
+
/**begin repeat2
* #kind = equal, not_equal, greater, greater_equal, less, less_equal,
* logical_and, logical_or#
@@ -1594,8 +1635,8 @@ NPY_NO_EXPORT void
/**end repeat**/
/**begin repeat
- * #func = exp, log#
- * #scalarf = npy_expf, npy_logf#
+ * #func = sin, cos, exp, log#
+ * #scalarf = npy_sinf, npy_cosf, npy_expf, npy_logf#
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
@@ -1610,8 +1651,8 @@ FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSE
/**end repeat**/
/**begin repeat
- * #isa = avx512f, avx2#
- * #ISA = AVX512F, AVX2#
+ * #isa = avx512f, fma#
+ * #ISA = AVX512F, FMA#
* #CHK = HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS#
*/
@@ -1642,6 +1683,31 @@ FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
}
/**end repeat1**/
+
+/**begin repeat1
+ * #func = cos, sin#
+ * #enum = npy_compute_cos, npy_compute_sin#
+ * #scalarf = npy_cosf, npy_sinf#
+ */
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_sincos_FLOAT(args, dimensions, steps, @enum@)) {
+ UNARY_LOOP {
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ @ISA@_sincos_FLOAT((npy_float *)op1, (npy_float *)ip1, 1, steps[0], @enum@);
+#else
+ const npy_float in1 = *(npy_float *)ip1;
+ *(npy_float *)op1 = @scalarf@(in1);
+#endif
+ }
+ }
+}
+
+/**end repeat1**/
+
+
/**end repeat**/
/**begin repeat
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 7f05a693a..5070ab38b 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -178,13 +178,13 @@ NPY_NO_EXPORT void
/**end repeat**/
/**begin repeat
- * #func = exp, log#
+ * #func = sin, cos, exp, log#
*/
NPY_NO_EXPORT void
FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
/**begin repeat1
- * #isa = avx512f, avx2#
+ * #isa = avx512f, fma#
*/
NPY_NO_EXPORT void
diff --git a/numpy/core/src/umath/matmul.c.src b/numpy/core/src/umath/matmul.c.src
index 480c0c72f..b5204eca5 100644
--- a/numpy/core/src/umath/matmul.c.src
+++ b/numpy/core/src/umath/matmul.c.src
@@ -196,16 +196,14 @@ NPY_NO_EXPORT void
* FLOAT, DOUBLE, HALF,
* CFLOAT, CDOUBLE, CLONGDOUBLE,
* UBYTE, USHORT, UINT, ULONG, ULONGLONG,
- * BYTE, SHORT, INT, LONG, LONGLONG,
- * BOOL#
+ * BYTE, SHORT, INT, LONG, LONGLONG#
* #typ = npy_longdouble,
* npy_float,npy_double,npy_half,
* npy_cfloat, npy_cdouble, npy_clongdouble,
* npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
- * npy_byte, npy_short, npy_int, npy_long, npy_longlong,
- * npy_bool#
- * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*11#
- * #IS_HALF = 0, 0, 0, 1, 0*14#
+ * npy_byte, npy_short, npy_int, npy_long, npy_longlong#
+ * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*10#
+ * #IS_HALF = 0, 0, 0, 1, 0*13#
*/
NPY_NO_EXPORT void
@@ -266,7 +264,44 @@ NPY_NO_EXPORT void
}
/**end repeat**/
+NPY_NO_EXPORT void
+BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
+ void *_ip2, npy_intp is2_n, npy_intp is2_p,
+ void *_op, npy_intp os_m, npy_intp os_p,
+ npy_intp dm, npy_intp dn, npy_intp dp)
+
+{
+ npy_intp m, n, p;
+ npy_intp ib2_p, ob_p;
+ char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op;
+ ib2_p = is2_p * dp;
+ ob_p = os_p * dp;
+
+ for (m = 0; m < dm; m++) {
+ for (p = 0; p < dp; p++) {
+ char *ip1tmp = ip1;
+ char *ip2tmp = ip2;
+ *(npy_bool *)op = NPY_FALSE;
+ for (n = 0; n < dn; n++) {
+ npy_bool val1 = (*(npy_bool *)ip1tmp);
+ npy_bool val2 = (*(npy_bool *)ip2tmp);
+ if (val1 != 0 && val2 != 0) {
+ *(npy_bool *)op = NPY_TRUE;
+ break;
+ }
+ ip2tmp += is2_n;
+ ip1tmp += is1_n;
+ }
+ op += os_p;
+ ip2 += is2_p;
+ }
+ op -= ob_p;
+ ip2 -= ib2_p;
+ ip1 += is1_m;
+ op += os_m;
+ }
+}
NPY_NO_EXPORT void
OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 8ae2f65e0..4ce8d8ab7 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -36,7 +36,7 @@
* If 'dtype' isn't NULL, this function steals its reference.
*/
static PyArrayObject *
-allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags,
+allocate_reduce_result(PyArrayObject *arr, const npy_bool *axis_flags,
PyArray_Descr *dtype, int subok)
{
npy_intp strides[NPY_MAXDIMS], stride;
@@ -84,7 +84,7 @@ allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags,
* The return value is a view into 'out'.
*/
static PyArrayObject *
-conform_reduce_result(int ndim, npy_bool *axis_flags,
+conform_reduce_result(int ndim, const npy_bool *axis_flags,
PyArrayObject *out, int keepdims, const char *funcname,
int need_copy)
{
@@ -251,7 +251,7 @@ PyArray_CreateReduceResult(PyArrayObject *operand, PyArrayObject *out,
* Count the number of dimensions selected in 'axis_flags'
*/
static int
-count_axes(int ndim, npy_bool *axis_flags)
+count_axes(int ndim, const npy_bool *axis_flags)
{
int idim;
int naxes = 0;
@@ -299,7 +299,7 @@ count_axes(int ndim, npy_bool *axis_flags)
NPY_NO_EXPORT PyArrayObject *
PyArray_InitializeReduceResult(
PyArrayObject *result, PyArrayObject *operand,
- npy_bool *axis_flags,
+ const npy_bool *axis_flags,
npy_intp *out_skip_first_count, const char *funcname)
{
npy_intp *strides, *shape, shape_orig[NPY_MAXDIMS];
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index a7987acda..9cce0b7f0 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -246,25 +246,26 @@ static void
/**end repeat**/
-
-/* QUESTION: Should we check for overflow / underflow in (l,r)shift? */
-
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong#
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong#
+ * #suffix = hh,uhh,h,uh,,u,l,ul,ll,ull#
*/
/**begin repeat1
- * #oper = and, xor, or, lshift, rshift#
- * #op = &, ^, |, <<, >>#
+ * #oper = and, xor, or#
+ * #op = &, ^, |#
*/
#define @name@_ctype_@oper@(arg1, arg2, out) *(out) = (arg1) @op@ (arg2)
/**end repeat1**/
+#define @name@_ctype_lshift(arg1, arg2, out) *(out) = npy_lshift@suffix@(arg1, arg2)
+#define @name@_ctype_rshift(arg1, arg2, out) *(out) = npy_rshift@suffix@(arg1, arg2)
+
/**end repeat**/
/**begin repeat
@@ -570,7 +571,7 @@ static void
* 1) Convert the types to the common type if both are scalars (0 return)
* 2) If both are not scalars use ufunc machinery (-2 return)
* 3) If both are scalars but cannot be cast to the right type
- * return NotImplmented (-1 return)
+ * return NotImplemented (-1 return)
*
* 4) Perform the function on the C-type.
* 5) If an error condition occurred, check to see
@@ -1429,24 +1430,53 @@ static PyObject *
/**begin repeat
*
+ * #name = byte, ubyte, short, ushort, int, uint,
+ * long, ulong, longlong, ulonglong,
+ * half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble#
+ * #Name = Byte, UByte, Short, UShort, Int, UInt,
+ * Long, ULong, LongLong, ULongLong,
+ * Half, Float, Double, LongDouble,
+ * CFloat, CDouble, CLongDouble#
+ * #cmplx = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1#
+ * #to_ctype = , , , , , , , , , , npy_half_to_double, , , , , , #
+ * #func = PyFloat_FromDouble*17#
+ */
+static NPY_INLINE PyObject *
+@name@_float(PyObject *obj)
+{
+#if @cmplx@
+ if (emit_complexwarning() < 0) {
+ return NULL;
+ }
+ return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@).real));
+#else
+ return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@)));
+#endif
+}
+/**end repeat**/
+
+
+#if !defined(NPY_PY3K)
+
+/**begin repeat
+ *
* #name = (byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong,
* half, float, double, longdouble,
- * cfloat, cdouble, clongdouble)*2#
+ * cfloat, cdouble, clongdouble)#
* #Name = (Byte, UByte, Short, UShort, Int, UInt,
* Long, ULong, LongLong, ULongLong,
* Half, Float, Double, LongDouble,
- * CFloat, CDouble, CLongDouble)*2#
- * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)*2#
- * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )*2#
- * #which = long*17, float*17#
+ * CFloat, CDouble, CLongDouble)#
+ * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)#
+ * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )#
* #func = (PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,
* PyLong_FromDouble*3, npy_longdouble_to_PyLong,
- * PyLong_FromDouble*2, npy_longdouble_to_PyLong,
- * PyFloat_FromDouble*17#
+ * PyLong_FromDouble*2, npy_longdouble_to_PyLong#
*/
static NPY_INLINE PyObject *
-@name@_@which@(PyObject *obj)
+@name@_long(PyObject *obj)
{
#if @cmplx@
if (emit_complexwarning() < 0) {
@@ -1459,8 +1489,6 @@ static NPY_INLINE PyObject *
}
/**end repeat**/
-#if !defined(NPY_PY3K)
-
/**begin repeat
*
* #name = (byte, ubyte, short, ushort, int, uint,
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index ecf2a7951..88e5e1f1b 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -130,8 +130,9 @@ abs_ptrdiff(char *a, char *b)
*/
/**begin repeat
- * #ISA = AVX2, AVX512F#
- * #isa = avx2, avx512f#
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512f#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
* #REGISTER_SIZE = 32, 64#
*/
@@ -141,7 +142,7 @@ abs_ptrdiff(char *a, char *b)
* #func = exp, log#
*/
-#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
static NPY_INLINE void
@ISA@_@func@_FLOAT(npy_float *, npy_float *, const npy_intp n, const npy_intp stride);
#endif
@@ -149,7 +150,7 @@ static NPY_INLINE void
static NPY_INLINE int
run_unary_@isa@_@func@_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps)
{
-#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), @REGISTER_SIZE@)) {
@ISA@_@func@_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0]);
return 1;
@@ -162,6 +163,25 @@ run_unary_@isa@_@func@_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps)
/**end repeat1**/
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE void
+@ISA@_sincos_FLOAT(npy_float *, npy_float *, const npy_intp n, const npy_intp steps, NPY_TRIG_OP);
+#endif
+
+static NPY_INLINE int
+run_unary_@isa@_sincos_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps, NPY_TRIG_OP my_trig_op)
+{
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), @REGISTER_SIZE@)) {
+ @ISA@_sincos_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0], my_trig_op);
+ return 1;
+ }
+ else
+ return 0;
+#endif
+ return 0;
+}
+
/**end repeat**/
@@ -997,7 +1017,7 @@ sse2_sqrt_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) {
op[i] = @scalarf@(ip[i]);
}
- assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
+ assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
npy_is_aligned(&op[i], VECTOR_SIZE_BYTES));
if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) {
LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@@ -1049,7 +1069,7 @@ sse2_@kind@_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) {
op[i] = @scalar@_@type@(ip[i]);
}
- assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
+ assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
npy_is_aligned(&op[i], VECTOR_SIZE_BYTES));
if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) {
LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@@ -1084,7 +1104,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
/* Order of operations important for MSVC 2015 */
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
- assert(n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES));
+ assert((npy_uintp)n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES));
if (i + 3 * stride <= n) {
/* load the first elements */
@vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@@ -1123,20 +1143,14 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
/* bunch of helper functions used in ISA_exp/log_FLOAT*/
#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_fmadd(__m256 a, __m256 b, __m256 c)
-{
- return _mm256_add_ps(_mm256_mul_ps(a, b), c);
-}
-
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_get_full_load_mask(void)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_full_load_mask(void)
{
return _mm256_set1_ps(-1.0);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_get_partial_load_mask(const npy_int num_lanes, const npy_int total_elem)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_partial_load_mask(const npy_int num_lanes, const npy_int total_elem)
{
float maskint[16] = {-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,
1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0};
@@ -1144,8 +1158,8 @@ avx2_get_partial_load_mask(const npy_int num_lanes, const npy_int total_elem)
return _mm256_loadu_ps(addr);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_masked_gather(__m256 src,
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_masked_gather(__m256 src,
npy_float* addr,
__m256i vindex,
__m256 mask)
@@ -1153,26 +1167,39 @@ avx2_masked_gather(__m256 src,
return _mm256_mask_i32gather_ps(src, addr, vindex, mask, 4);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_masked_load(__m256 mask, npy_float* addr)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_masked_load(__m256 mask, npy_float* addr)
{
return _mm256_maskload_ps(addr, _mm256_cvtps_epi32(mask));
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_set_masked_lanes(__m256 x, __m256 val, __m256 mask)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_set_masked_lanes(__m256 x, __m256 val, __m256 mask)
{
return _mm256_blendv_ps(x, val, mask);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_blend(__m256 x, __m256 y, __m256 ymask)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_blend(__m256 x, __m256 y, __m256 ymask)
{
return _mm256_blendv_ps(x, y, ymask);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_get_exponent(__m256 x)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_should_calculate_sine(__m256i k, __m256i andop, __m256i cmp)
+{
+ return _mm256_cvtepi32_ps(
+ _mm256_cmpeq_epi32(_mm256_and_si256(k, andop), cmp));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_should_negate(__m256i k, __m256i andop, __m256i cmp)
+{
+ return fma_should_calculate_sine(k, andop, cmp);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_exponent(__m256 x)
{
/*
* Special handling of denormals:
@@ -1198,8 +1225,8 @@ avx2_get_exponent(__m256 x)
return _mm256_blendv_ps(exp, denorm_exp, denormal_mask);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_get_mantissa(__m256 x)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_mantissa(__m256 x)
{
/*
* Special handling of denormals:
@@ -1223,6 +1250,46 @@ avx2_get_mantissa(__m256 x)
_mm256_and_si256(
_mm256_castps_si256(x), mantissa_bits), exp_126_bits));
}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
+fma_scalef_ps(__m256 poly, __m256 quadrant)
+{
+ /*
+ * Handle denormals (which occur when quadrant <= -125):
+ * 1) This function computes poly*(2^quad) by adding the exponent of
+ poly to quad
+ * 2) When quad <= -125, the output is a denormal and the above logic
+ breaks down
+ * 3) To handle such cases, we split quadrant: -125 + (quadrant + 125)
+ * 4) poly*(2^-125) is computed the usual way
+ * 5) 2^(quad-125) can be computed by: 2 << abs(quad-125)
+ * 6) The final div operation generates the denormal
+ */
+ __m256 minquadrant = _mm256_set1_ps(-125.0f);
+ __m256 denormal_mask = _mm256_cmp_ps(quadrant, minquadrant, _CMP_LE_OQ);
+ if (_mm256_movemask_ps(denormal_mask) != 0x0000) {
+ __m256 quad_diff = _mm256_sub_ps(quadrant, minquadrant);
+ quad_diff = _mm256_sub_ps(_mm256_setzero_ps(), quad_diff);
+ quad_diff = _mm256_blendv_ps(_mm256_setzero_ps(), quad_diff, denormal_mask);
+ __m256i two_power_diff = _mm256_sllv_epi32(
+ _mm256_set1_epi32(1), _mm256_cvtps_epi32(quad_diff));
+ quadrant = _mm256_max_ps(quadrant, minquadrant); //keep quadrant >= -126
+ __m256i exponent = _mm256_slli_epi32(_mm256_cvtps_epi32(quadrant), 23);
+ poly = _mm256_castsi256_ps(
+ _mm256_add_epi32(
+ _mm256_castps_si256(poly), exponent));
+ __m256 denorm_poly = _mm256_div_ps(poly, _mm256_cvtepi32_ps(two_power_diff));
+ return _mm256_blendv_ps(poly, denorm_poly, denormal_mask);
+ }
+ else {
+ __m256i exponent = _mm256_slli_epi32(_mm256_cvtps_epi32(quadrant), 23);
+ poly = _mm256_castsi256_ps(
+ _mm256_add_epi32(
+ _mm256_castps_si256(poly), exponent));
+ return poly;
+ }
+}
+
#endif
#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS
@@ -1265,6 +1332,18 @@ avx512_blend(__m512 x, __m512 y, __mmask16 ymask)
return _mm512_mask_mov_ps(x, ymask, y);
}
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_should_calculate_sine(__m512i k, __m512i andop, __m512i cmp)
+{
+ return _mm512_cmpeq_epi32_mask(_mm512_and_epi32(k, andop), cmp);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_should_negate(__m512i k, __m512i andop, __m512i cmp)
+{
+ return avx512_should_calculate_sine(k, andop, cmp);
+}
+
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
avx512_get_exponent(__m512 x)
{
@@ -1276,20 +1355,37 @@ avx512_get_mantissa(__m512 x)
{
return _mm512_getmant_ps(x, _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src);
}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
+avx512_scalef_ps(__m512 poly, __m512 quadrant)
+{
+ return _mm512_scalef_ps(poly, quadrant);
+}
#endif
/**begin repeat
- * #ISA = AVX2, AVX512F#
- * #isa = avx2, avx512#
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
* #vtype = __m256, __m512#
* #vsize = 256, 512#
* #or = or_ps, kor#
* #vsub = , _mask#
* #mask = __m256, __mmask16#
- * #fmadd = avx2_fmadd,_mm512_fmadd_ps#
+ * #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
**/
-#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS
+#if defined @CHK@
+
+/*
+ * Vectorized Cody-Waite range reduction technique
+ * Performs the reduction step x* = x - y*C in three steps:
+ * 1) x* = x - y*c1
+ * 2) x* = x - y*c2
+ * 3) x* = x - y*c3
+ * c1, c2 are exact floating points, c3 = C - c1 - c2 simulates higher precision
+ */
+
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
@isa@_range_reduction(@vtype@ x, @vtype@ y, @vtype@ c1, @vtype@ c2, @vtype@ c3)
{
@@ -1298,12 +1394,56 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
reduced_x = @fmadd@(y, c3, reduced_x);
return reduced_x;
}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @mask@
+@isa@_in_range_mask(@vtype@ x, npy_float fmax, npy_float fmin)
+{
+ @mask@ m1 = _mm@vsize@_cmp_ps@vsub@(
+ x, _mm@vsize@_set1_ps(fmax), _CMP_GT_OQ);
+ @mask@ m2 = _mm@vsize@_cmp_ps@vsub@(
+ x, _mm@vsize@_set1_ps(fmin), _CMP_LT_OQ);
+ return _mm@vsize@_@or@(m1,m2);
+}
+
+/*
+ * Approximate cosine algorithm for x \in [-PI/4, PI/4]
+ * Maximum ULP across all 32-bit floats = 0.875
+ */
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_cosine(@vtype@ x2, @vtype@ invf8, @vtype@ invf6, @vtype@ invf4,
+ @vtype@ invf2, @vtype@ invf0)
+{
+ @vtype@ cos = @fmadd@(invf8, x2, invf6);
+ cos = @fmadd@(cos, x2, invf4);
+ cos = @fmadd@(cos, x2, invf2);
+ cos = @fmadd@(cos, x2, invf0);
+ return cos;
+}
+
+/*
+ * Approximate sine algorithm for x \in [-PI/4, PI/4]
+ * Maximum ULP across all 32-bit floats = 0.647
+ */
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_sine(@vtype@ x, @vtype@ x2, @vtype@ invf9, @vtype@ invf7,
+ @vtype@ invf5, @vtype@ invf3,
+ @vtype@ zero)
+{
+ @vtype@ sin = @fmadd@(invf9, x2, invf7);
+ sin = @fmadd@(sin, x2, invf5);
+ sin = @fmadd@(sin, x2, invf3);
+ sin = @fmadd@(sin, x2, zero);
+ sin = @fmadd@(sin, x, x);
+ return sin;
+}
#endif
/**end repeat**/
/**begin repeat
- * #ISA = AVX2, AVX512F#
- * #isa = avx2, avx512#
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
* #vtype = __m256, __m512#
* #vsize = 256, 512#
* #BYTES = 32, 64#
@@ -1312,13 +1452,165 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
* #or_masks =_mm256_or_ps, _mm512_kor#
* #and_masks =_mm256_and_ps, _mm512_kand#
* #xor_masks =_mm256_xor_ps, _mm512_kxor#
- * #fmadd = avx2_fmadd,_mm512_fmadd_ps#
+ * #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps#
* #mask_to_int = _mm256_movemask_ps, #
* #full_mask= 0xFF, 0xFFFF#
* #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps#
* #cvtps_epi32 = _mm256_cvtps_epi32, #
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
+ */
+
+
+/*
+ * Vectorized approximate sine/cosine algorithms: The following code is a
+ * vectorized version of the algorithm presented here:
+ * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751
+ * (1) Load data in ZMM/YMM registers and generate mask for elements that are
+ * within range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f,
+ * 117435.992f] for sine.
+ * (2) For elements within range, perform range reduction using Cody-Waite's
+ * method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4].
+ * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k =
+ * int(y).
+ * (4) For elements outside that range, Cody-Waite reduction peforms poorly
+ * leading to catastrophic cancellation. We compute cosine by calling glibc in
+ * a scalar fashion.
+ * (5) Vectorized implementation has a max ULP of 1.49 and performs at least
+ * 5-7x faster than scalar implementations when magnitude of all elements in
+ * the array < 71476.0625f (117435.992f for sine). Worst case performance is
+ * when all the elements are large leading to about 1-2% reduction in
+ * performance.
*/
+#if defined @CHK@
+static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
+@ISA@_sincos_FLOAT(npy_float * op,
+ npy_float * ip,
+ const npy_intp array_size,
+ const npy_intp steps,
+ NPY_TRIG_OP my_trig_op)
+{
+ const npy_intp stride = steps/sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ npy_float large_number = 71476.0625f;
+ if (my_trig_op == npy_compute_sin) {
+ large_number = 117435.992f;
+ }
+
+ /* Load up frequently used constants */
+ @vtype@i zeros = _mm@vsize@_set1_epi32(0);
+ @vtype@i ones = _mm@vsize@_set1_epi32(1);
+ @vtype@i twos = _mm@vsize@_set1_epi32(2);
+ @vtype@ two_over_pi = _mm@vsize@_set1_ps(NPY_TWO_O_PIf);
+ @vtype@ codyw_c1 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_HIGHf);
+ @vtype@ codyw_c2 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_MEDf);
+ @vtype@ codyw_c3 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_LOWf);
+ @vtype@ cos_invf0 = _mm@vsize@_set1_ps(NPY_COEFF_INVF0_COSINEf);
+ @vtype@ cos_invf2 = _mm@vsize@_set1_ps(NPY_COEFF_INVF2_COSINEf);
+ @vtype@ cos_invf4 = _mm@vsize@_set1_ps(NPY_COEFF_INVF4_COSINEf);
+ @vtype@ cos_invf6 = _mm@vsize@_set1_ps(NPY_COEFF_INVF6_COSINEf);
+ @vtype@ cos_invf8 = _mm@vsize@_set1_ps(NPY_COEFF_INVF8_COSINEf);
+ @vtype@ sin_invf3 = _mm@vsize@_set1_ps(NPY_COEFF_INVF3_SINEf);
+ @vtype@ sin_invf5 = _mm@vsize@_set1_ps(NPY_COEFF_INVF5_SINEf);
+ @vtype@ sin_invf7 = _mm@vsize@_set1_ps(NPY_COEFF_INVF7_SINEf);
+ @vtype@ sin_invf9 = _mm@vsize@_set1_ps(NPY_COEFF_INVF9_SINEf);
+ @vtype@ cvt_magic = _mm@vsize@_set1_ps(NPY_RINT_CVT_MAGICf);
+ @vtype@ zero_f = _mm@vsize@_set1_ps(0.0f);
+ @vtype@ quadrant, reduced_x, reduced_x2, cos, sin;
+ @vtype@i iquadrant;
+ @mask@ nan_mask, glibc_mask, sine_mask, negate_mask;
+ @mask@ load_mask = @isa@_get_full_load_mask();
+ npy_intp num_remaining_elements = array_size;
+ npy_int indexarr[16];
+ for (npy_int ii = 0; ii < 16; ii++) {
+ indexarr[ii] = ii*stride;
+ }
+ @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
+
+ while (num_remaining_elements > 0) {
+
+ if (num_remaining_elements < num_lanes) {
+ load_mask = @isa@_get_partial_load_mask(num_remaining_elements,
+ num_lanes);
+ }
+
+ @vtype@ x;
+ if (stride == 1) {
+ x = @isa@_masked_load(load_mask, ip);
+ }
+ else {
+ x = @isa@_masked_gather(zero_f, ip, vindex, load_mask);
+ }
+
+ /*
+ * For elements outside of this range, Cody-Waite's range reduction
+ * becomes inaccurate and we will call glibc to compute cosine for
+ * these numbers
+ */
+
+ glibc_mask = @isa@_in_range_mask(x, large_number,-large_number);
+ glibc_mask = @and_masks@(load_mask, glibc_mask);
+ nan_mask = _mm@vsize@_cmp_ps@vsub@(x, x, _CMP_NEQ_UQ);
+ x = @isa@_set_masked_lanes(x, zero_f, @or_masks@(nan_mask, glibc_mask));
+ npy_int iglibc_mask = @mask_to_int@(glibc_mask);
+
+ if (iglibc_mask != @full_mask@) {
+ quadrant = _mm@vsize@_mul_ps(x, two_over_pi);
+
+ /* round to nearest */
+ quadrant = _mm@vsize@_add_ps(quadrant, cvt_magic);
+ quadrant = _mm@vsize@_sub_ps(quadrant, cvt_magic);
+
+ /* Cody-Waite's range reduction algorithm */
+ reduced_x = @isa@_range_reduction(x, quadrant,
+ codyw_c1, codyw_c2, codyw_c3);
+ reduced_x2 = _mm@vsize@_mul_ps(reduced_x, reduced_x);
+
+ /* compute cosine and sine */
+ cos = @isa@_cosine(reduced_x2, cos_invf8, cos_invf6, cos_invf4,
+ cos_invf2, cos_invf0);
+ sin = @isa@_sine(reduced_x, reduced_x2, sin_invf9, sin_invf7,
+ sin_invf5, sin_invf3, zero_f);
+
+ iquadrant = _mm@vsize@_cvtps_epi32(quadrant);
+ if (my_trig_op == npy_compute_cos) {
+ iquadrant = _mm@vsize@_add_epi32(iquadrant, ones);
+ }
+
+ /* blend sin and cos based on the quadrant */
+ sine_mask = @isa@_should_calculate_sine(iquadrant, ones, zeros);
+ cos = @isa@_blend(cos, sin, sine_mask);
+
+ /* multiply by -1 for appropriate elements */
+ negate_mask = @isa@_should_negate(iquadrant, twos, twos);
+ cos = @isa@_blend(cos, _mm@vsize@_sub_ps(zero_f, cos), negate_mask);
+ cos = @isa@_set_masked_lanes(cos, _mm@vsize@_set1_ps(NPY_NANF), nan_mask);
+
+ @masked_store@(op, @cvtps_epi32@(load_mask), cos);
+ }
+
+ /* process elements using glibc for large elements */
+ if (my_trig_op == npy_compute_cos) {
+ for (int ii = 0; iglibc_mask != 0; ii++) {
+ if (iglibc_mask & 0x01) {
+ op[ii] = npy_cosf(ip[ii]);
+ }
+ iglibc_mask = iglibc_mask >> 1;
+ }
+ }
+ else {
+ for (int ii = 0; iglibc_mask != 0; ii++) {
+ if (iglibc_mask & 0x01) {
+ op[ii] = npy_sinf(ip[ii]);
+ }
+ iglibc_mask = iglibc_mask >> 1;
+ }
+ }
+ ip += num_lanes*stride;
+ op += num_lanes;
+ num_remaining_elements -= num_lanes;
+ }
+}
/*
* Vectorized implementation of exp using AVX2 and AVX512:
@@ -1335,7 +1627,6 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
* same x = 0xc2781e37)
*/
-#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS
static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
@ISA@_exp_FLOAT(npy_float * op,
npy_float * ip,
@@ -1345,7 +1636,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
const npy_intp stride = steps/sizeof(npy_float);
const npy_int num_lanes = @BYTES@/sizeof(npy_float);
npy_float xmax = 88.72283935546875f;
- npy_float xmin = -87.3365478515625f;
+ npy_float xmin = -103.97208404541015625f;
npy_int indexarr[16];
for (npy_int ii = 0; ii < 16; ii++) {
indexarr[ii] = ii*stride;
@@ -1369,7 +1660,6 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
@vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f);
@vtype@ poly, num_poly, denom_poly, quadrant;
@vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
- @vtype@i exponent;
@mask@ xmax_mask, xmin_mask, nan_mask, inf_mask;
@mask@ overflow_mask = @isa@_get_partial_load_mask(0, num_lanes);
@@ -1426,10 +1716,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
* exponent of quadrant to the exponent of poly. quadrant is an int,
* so extracting exponent is simply extracting 8 bits.
*/
- exponent = _mm@vsize@_slli_epi32(_mm@vsize@_cvtps_epi32(quadrant), 23);
- poly = _mm@vsize@_castsi@vsize@_ps(
- _mm@vsize@_add_epi32(
- _mm@vsize@_castps_si@vsize@(poly), exponent));
+ poly = @isa@_scalef_ps(poly, quadrant);
/*
* elem > xmax; return inf
@@ -1494,6 +1781,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
@vtype@ log_q5 = _mm@vsize@_set1_ps(NPY_COEFF_Q5_LOGf);
@vtype@ loge2 = _mm@vsize@_set1_ps(NPY_LOGE2f);
@vtype@ nan = _mm@vsize@_set1_ps(NPY_NANF);
+ @vtype@ neg_nan = _mm@vsize@_set1_ps(-NPY_NANF);
@vtype@ neg_inf = _mm@vsize@_set1_ps(-NPY_INFINITYF);
@vtype@ inf = _mm@vsize@_set1_ps(NPY_INFINITYF);
@vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f);
@@ -1560,11 +1848,12 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
poly = @fmadd@(exponent, loge2, poly);
/*
- * x < 0.0f; return NAN
+ * x < 0.0f; return -NAN
* x = +/- NAN; return NAN
* x = 0.0f; return -INF
*/
- poly = @isa@_set_masked_lanes(poly, nan, @or_masks@(negx_mask, nan_mask));
+ poly = @isa@_set_masked_lanes(poly, nan, nan_mask);
+ poly = @isa@_set_masked_lanes(poly, neg_nan, negx_mask);
poly = @isa@_set_masked_lanes(poly, neg_inf, zero_mask);
poly = @isa@_set_masked_lanes(poly, inf, inf_mask);
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 5f9a0f7f4..c36680ed2 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -908,7 +908,7 @@ parse_ufunc_keywords(PyUFuncObject *ufunc, PyObject *kwds, PyObject **kwnames, .
typedef int converter(PyObject *, void *);
while (PyDict_Next(kwds, &pos, &key, &value)) {
- int i;
+ npy_intp i;
converter *convert;
void *output = NULL;
npy_intp index = locate_key(kwnames, key);
@@ -2297,7 +2297,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes,
* Returns 0 on success, and -1 on failure
*/
static int
-_parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis,
+_parse_axis_arg(PyUFuncObject *ufunc, const int core_num_dims[], PyObject *axis,
PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nop = ufunc->nargs;
int iop, axis_int;
@@ -2368,7 +2368,7 @@ _parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis,
*/
static int
_get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
- int *op_core_num_dims, npy_uint32 *core_dim_flags,
+ const int *op_core_num_dims, npy_uint32 *core_dim_flags,
npy_intp *core_dim_sizes, int **remap_axis) {
int i;
int nin = ufunc->nin;
@@ -4053,14 +4053,14 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
int *op_axes[3] = {op_axes_arrays[0], op_axes_arrays[1],
op_axes_arrays[2]};
npy_uint32 op_flags[3];
- int i, idim, ndim, otype_final;
+ int idim, ndim, otype_final;
int need_outer_iterator = 0;
NpyIter *iter = NULL;
/* The reduceat indices - ind must be validated outside this call */
npy_intp *reduceat_ind;
- npy_intp ind_size, red_axis_size;
+ npy_intp i, ind_size, red_axis_size;
/* The selected inner loop */
PyUFuncGenericFunction innerloop = NULL;
void *innerloopdata = NULL;
@@ -4146,7 +4146,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
#endif
/* Set up the op_axes for the outer loop */
- for (i = 0, idim = 0; idim < ndim; ++idim) {
+ for (idim = 0; idim < ndim; ++idim) {
/* Use the i-th iteration dimension to match up ind */
if (idim == axis) {
op_axes_arrays[0][idim] = axis;
@@ -4866,7 +4866,7 @@ ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args)
NPY_NO_EXPORT int
PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func,
PyUFuncGenericFunction newfunc,
- int *signature,
+ const int *signature,
PyUFuncGenericFunction *oldfunc)
{
int i, j;
@@ -4921,7 +4921,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi
char *types, int ntypes,
int nin, int nout, int identity,
const char *name, const char *doc,
- int unused, const char *signature,
+ const int unused, const char *signature,
PyObject *identity_value)
{
PyUFuncObject *ufunc;
@@ -5223,7 +5223,7 @@ NPY_NO_EXPORT int
PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
int usertype,
PyUFuncGenericFunction function,
- int *arg_types,
+ const int *arg_types,
void *data)
{
PyArray_Descr *descr;
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index d837df117..9be7b63a0 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -1958,7 +1958,8 @@ linear_search_type_resolver(PyUFuncObject *self,
npy_intp i, j, nin = self->nin, nop = nin + self->nout;
int types[NPY_MAXARGS];
const char *ufunc_name;
- int no_castable_output, use_min_scalar;
+ int no_castable_output = 0;
+ int use_min_scalar;
/* For making a better error message on coercion error */
char err_dst_typecode = '-', err_src_typecode = '-';
diff --git a/numpy/core/tests/data/umath-validation-set-README b/numpy/core/tests/data/umath-validation-set-README
new file mode 100644
index 000000000..6561ca3b5
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-README
@@ -0,0 +1,15 @@
+Steps to validate transcendental functions:
+1) Add a file 'umath-validation-set-<ufuncname>', where ufuncname is name of
+ the function in NumPy you want to validate
+2) The file should contain 4 columns: dtype,input,expected output,ulperror
+ a. dtype: one of np.float16, np.float32, np.float64
+ b. input: floating point input to ufunc in hex. Example: 0x414570a4
+ represents 12.340000152587890625
+ c. expected output: floating point output for the corresponding input in hex.
+ This should be computed using a high(er) precision library and then rounded to
+ same format as the input.
+ d. ulperror: expected maximum ulp error of the function. This
+ should be same across all rows of the same dtype. Otherwise, the function is
+ tested for the maximum ulp error among all entries of that dtype.
+3) Add file umath-validation-set-<ufuncname> to the test file test_umath_accuracy.py
+ which will then validate your ufunc.
diff --git a/numpy/core/tests/data/umath-validation-set-cos b/numpy/core/tests/data/umath-validation-set-cos
new file mode 100644
index 000000000..360ebcd6a
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-cos
@@ -0,0 +1,707 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0x3f800000,2
+np.float32,0x007b2490,0x3f800000,2
+np.float32,0x007c99fa,0x3f800000,2
+np.float32,0x00734a0c,0x3f800000,2
+np.float32,0x0070de24,0x3f800000,2
+np.float32,0x007fffff,0x3f800000,2
+np.float32,0x00000001,0x3f800000,2
+## -ve denormals ##
+np.float32,0x80495d65,0x3f800000,2
+np.float32,0x806894f6,0x3f800000,2
+np.float32,0x80555a76,0x3f800000,2
+np.float32,0x804e1fb8,0x3f800000,2
+np.float32,0x80687de9,0x3f800000,2
+np.float32,0x807fffff,0x3f800000,2
+np.float32,0x80000001,0x3f800000,2
+## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
+np.float32,0x00000000,0x3f800000,2
+np.float32,0x80000000,0x3f800000,2
+np.float32,0x00800000,0x3f800000,2
+np.float32,0x7f7fffff,0x3f5a5f96,2
+np.float32,0x80800000,0x3f800000,2
+np.float32,0xff7fffff,0x3f5a5f96,2
+## 1.00f + 0x00000001 ##
+np.float32,0x3f800000,0x3f0a5140,2
+np.float32,0x3f800001,0x3f0a513f,2
+np.float32,0x3f800002,0x3f0a513d,2
+np.float32,0xc090a8b0,0xbe4332ce,2
+np.float32,0x41ce3184,0x3f4d1de1,2
+np.float32,0xc1d85848,0xbeaa8980,2
+np.float32,0x402b8820,0xbf653aa3,2
+np.float32,0x42b4e454,0xbf4a338b,2
+np.float32,0x42a67a60,0x3c58202e,2
+np.float32,0x41d92388,0xbed987c7,2
+np.float32,0x422dd66c,0x3f5dcab3,2
+np.float32,0xc28f5be6,0xbf5688d8,2
+np.float32,0x41ab2674,0xbf53aa3b,2
+np.float32,0xd0102756,0x3f45d12d,2
+np.float32,0xcf99405e,0xbe9cf281,2
+np.float32,0xcfd83a12,0x3eaae4ca,2
+np.float32,0x4fb54db0,0xbf7b2894,2
+np.float32,0xcfcca29d,0x3f752e4e,2
+np.float32,0xceec2ac0,0xbf745303,2
+np.float32,0xcfdca97f,0x3ef554a7,2
+np.float32,0xcfe92b0a,0x3f4618f2,2
+np.float32,0x5014b0eb,0x3ee933e6,2
+np.float32,0xcfa7ee96,0xbeedeeb2,2
+np.float32,0x754c09a0,0xbef298de,2
+np.float32,0x77a731fb,0x3f24599f,2
+np.float32,0x76de2494,0x3f79576c,2
+np.float32,0xf74920dc,0xbf4d196e,2
+np.float32,0x7707a312,0xbeb5cb8e,2
+np.float32,0x75bf9790,0xbf7fd7fe,2
+np.float32,0xf4ca7c40,0xbe15107d,2
+np.float32,0x77e91899,0xbe8a968b,2
+np.float32,0xf74c9820,0xbf7f9677,2
+np.float32,0x7785ca29,0xbe6ef93b,2
+np.float32,0x3f490fdb,0x3f3504f3,2
+np.float32,0xbf490fdb,0x3f3504f3,2
+np.float32,0x3fc90fdb,0xb33bbd2e,2
+np.float32,0xbfc90fdb,0xb33bbd2e,2
+np.float32,0x40490fdb,0xbf800000,2
+np.float32,0xc0490fdb,0xbf800000,2
+np.float32,0x3fc90fdb,0xb33bbd2e,2
+np.float32,0xbfc90fdb,0xb33bbd2e,2
+np.float32,0x40490fdb,0xbf800000,2
+np.float32,0xc0490fdb,0xbf800000,2
+np.float32,0x40c90fdb,0x3f800000,2
+np.float32,0xc0c90fdb,0x3f800000,2
+np.float32,0x4016cbe4,0xbf3504f3,2
+np.float32,0xc016cbe4,0xbf3504f3,2
+np.float32,0x4096cbe4,0x324cde2e,2
+np.float32,0xc096cbe4,0x324cde2e,2
+np.float32,0x4116cbe4,0xbf800000,2
+np.float32,0xc116cbe4,0xbf800000,2
+np.float32,0x40490fdb,0xbf800000,2
+np.float32,0xc0490fdb,0xbf800000,2
+np.float32,0x40c90fdb,0x3f800000,2
+np.float32,0xc0c90fdb,0x3f800000,2
+np.float32,0x41490fdb,0x3f800000,2
+np.float32,0xc1490fdb,0x3f800000,2
+np.float32,0x407b53d2,0xbf3504f1,2
+np.float32,0xc07b53d2,0xbf3504f1,2
+np.float32,0x40fb53d2,0xb4b5563d,2
+np.float32,0xc0fb53d2,0xb4b5563d,2
+np.float32,0x417b53d2,0xbf800000,2
+np.float32,0xc17b53d2,0xbf800000,2
+np.float32,0x4096cbe4,0x324cde2e,2
+np.float32,0xc096cbe4,0x324cde2e,2
+np.float32,0x4116cbe4,0xbf800000,2
+np.float32,0xc116cbe4,0xbf800000,2
+np.float32,0x4196cbe4,0x3f800000,2
+np.float32,0xc196cbe4,0x3f800000,2
+np.float32,0x40afede0,0x3f3504f7,2
+np.float32,0xc0afede0,0x3f3504f7,2
+np.float32,0x412fede0,0x353222c4,2
+np.float32,0xc12fede0,0x353222c4,2
+np.float32,0x41afede0,0xbf800000,2
+np.float32,0xc1afede0,0xbf800000,2
+np.float32,0x40c90fdb,0x3f800000,2
+np.float32,0xc0c90fdb,0x3f800000,2
+np.float32,0x41490fdb,0x3f800000,2
+np.float32,0xc1490fdb,0x3f800000,2
+np.float32,0x41c90fdb,0x3f800000,2
+np.float32,0xc1c90fdb,0x3f800000,2
+np.float32,0x40e231d6,0x3f3504f3,2
+np.float32,0xc0e231d6,0x3f3504f3,2
+np.float32,0x416231d6,0xb319a6a2,2
+np.float32,0xc16231d6,0xb319a6a2,2
+np.float32,0x41e231d6,0xbf800000,2
+np.float32,0xc1e231d6,0xbf800000,2
+np.float32,0x40fb53d2,0xb4b5563d,2
+np.float32,0xc0fb53d2,0xb4b5563d,2
+np.float32,0x417b53d2,0xbf800000,2
+np.float32,0xc17b53d2,0xbf800000,2
+np.float32,0x41fb53d2,0x3f800000,2
+np.float32,0xc1fb53d2,0x3f800000,2
+np.float32,0x410a3ae7,0xbf3504fb,2
+np.float32,0xc10a3ae7,0xbf3504fb,2
+np.float32,0x418a3ae7,0x35b08908,2
+np.float32,0xc18a3ae7,0x35b08908,2
+np.float32,0x420a3ae7,0xbf800000,2
+np.float32,0xc20a3ae7,0xbf800000,2
+np.float32,0x4116cbe4,0xbf800000,2
+np.float32,0xc116cbe4,0xbf800000,2
+np.float32,0x4196cbe4,0x3f800000,2
+np.float32,0xc196cbe4,0x3f800000,2
+np.float32,0x4216cbe4,0x3f800000,2
+np.float32,0xc216cbe4,0x3f800000,2
+np.float32,0x41235ce2,0xbf3504ef,2
+np.float32,0xc1235ce2,0xbf3504ef,2
+np.float32,0x41a35ce2,0xb53889b6,2
+np.float32,0xc1a35ce2,0xb53889b6,2
+np.float32,0x42235ce2,0xbf800000,2
+np.float32,0xc2235ce2,0xbf800000,2
+np.float32,0x412fede0,0x353222c4,2
+np.float32,0xc12fede0,0x353222c4,2
+np.float32,0x41afede0,0xbf800000,2
+np.float32,0xc1afede0,0xbf800000,2
+np.float32,0x422fede0,0x3f800000,2
+np.float32,0xc22fede0,0x3f800000,2
+np.float32,0x413c7edd,0x3f3504f4,2
+np.float32,0xc13c7edd,0x3f3504f4,2
+np.float32,0x41bc7edd,0x33800add,2
+np.float32,0xc1bc7edd,0x33800add,2
+np.float32,0x423c7edd,0xbf800000,2
+np.float32,0xc23c7edd,0xbf800000,2
+np.float32,0x41490fdb,0x3f800000,2
+np.float32,0xc1490fdb,0x3f800000,2
+np.float32,0x41c90fdb,0x3f800000,2
+np.float32,0xc1c90fdb,0x3f800000,2
+np.float32,0x42490fdb,0x3f800000,2
+np.float32,0xc2490fdb,0x3f800000,2
+np.float32,0x4155a0d9,0x3f3504eb,2
+np.float32,0xc155a0d9,0x3f3504eb,2
+np.float32,0x41d5a0d9,0xb5b3bc81,2
+np.float32,0xc1d5a0d9,0xb5b3bc81,2
+np.float32,0x4255a0d9,0xbf800000,2
+np.float32,0xc255a0d9,0xbf800000,2
+np.float32,0x416231d6,0xb319a6a2,2
+np.float32,0xc16231d6,0xb319a6a2,2
+np.float32,0x41e231d6,0xbf800000,2
+np.float32,0xc1e231d6,0xbf800000,2
+np.float32,0x426231d6,0x3f800000,2
+np.float32,0xc26231d6,0x3f800000,2
+np.float32,0x416ec2d4,0xbf3504f7,2
+np.float32,0xc16ec2d4,0xbf3504f7,2
+np.float32,0x41eec2d4,0x353ef0a7,2
+np.float32,0xc1eec2d4,0x353ef0a7,2
+np.float32,0x426ec2d4,0xbf800000,2
+np.float32,0xc26ec2d4,0xbf800000,2
+np.float32,0x417b53d2,0xbf800000,2
+np.float32,0xc17b53d2,0xbf800000,2
+np.float32,0x41fb53d2,0x3f800000,2
+np.float32,0xc1fb53d2,0x3f800000,2
+np.float32,0x427b53d2,0x3f800000,2
+np.float32,0xc27b53d2,0x3f800000,2
+np.float32,0x4183f268,0xbf3504e7,2
+np.float32,0xc183f268,0xbf3504e7,2
+np.float32,0x4203f268,0xb6059a13,2
+np.float32,0xc203f268,0xb6059a13,2
+np.float32,0x4283f268,0xbf800000,2
+np.float32,0xc283f268,0xbf800000,2
+np.float32,0x418a3ae7,0x35b08908,2
+np.float32,0xc18a3ae7,0x35b08908,2
+np.float32,0x420a3ae7,0xbf800000,2
+np.float32,0xc20a3ae7,0xbf800000,2
+np.float32,0x428a3ae7,0x3f800000,2
+np.float32,0xc28a3ae7,0x3f800000,2
+np.float32,0x41908365,0x3f3504f0,2
+np.float32,0xc1908365,0x3f3504f0,2
+np.float32,0x42108365,0xb512200d,2
+np.float32,0xc2108365,0xb512200d,2
+np.float32,0x42908365,0xbf800000,2
+np.float32,0xc2908365,0xbf800000,2
+np.float32,0x4196cbe4,0x3f800000,2
+np.float32,0xc196cbe4,0x3f800000,2
+np.float32,0x4216cbe4,0x3f800000,2
+np.float32,0xc216cbe4,0x3f800000,2
+np.float32,0x4296cbe4,0x3f800000,2
+np.float32,0xc296cbe4,0x3f800000,2
+np.float32,0x419d1463,0x3f3504ef,2
+np.float32,0xc19d1463,0x3f3504ef,2
+np.float32,0x421d1463,0xb5455799,2
+np.float32,0xc21d1463,0xb5455799,2
+np.float32,0x429d1463,0xbf800000,2
+np.float32,0xc29d1463,0xbf800000,2
+np.float32,0x41a35ce2,0xb53889b6,2
+np.float32,0xc1a35ce2,0xb53889b6,2
+np.float32,0x42235ce2,0xbf800000,2
+np.float32,0xc2235ce2,0xbf800000,2
+np.float32,0x42a35ce2,0x3f800000,2
+np.float32,0xc2a35ce2,0x3f800000,2
+np.float32,0x41a9a561,0xbf3504ff,2
+np.float32,0xc1a9a561,0xbf3504ff,2
+np.float32,0x4229a561,0x360733d0,2
+np.float32,0xc229a561,0x360733d0,2
+np.float32,0x42a9a561,0xbf800000,2
+np.float32,0xc2a9a561,0xbf800000,2
+np.float32,0x41afede0,0xbf800000,2
+np.float32,0xc1afede0,0xbf800000,2
+np.float32,0x422fede0,0x3f800000,2
+np.float32,0xc22fede0,0x3f800000,2
+np.float32,0x42afede0,0x3f800000,2
+np.float32,0xc2afede0,0x3f800000,2
+np.float32,0x41b6365e,0xbf3504f6,2
+np.float32,0xc1b6365e,0xbf3504f6,2
+np.float32,0x4236365e,0x350bb91c,2
+np.float32,0xc236365e,0x350bb91c,2
+np.float32,0x42b6365e,0xbf800000,2
+np.float32,0xc2b6365e,0xbf800000,2
+np.float32,0x41bc7edd,0x33800add,2
+np.float32,0xc1bc7edd,0x33800add,2
+np.float32,0x423c7edd,0xbf800000,2
+np.float32,0xc23c7edd,0xbf800000,2
+np.float32,0x42bc7edd,0x3f800000,2
+np.float32,0xc2bc7edd,0x3f800000,2
+np.float32,0x41c2c75c,0x3f3504f8,2
+np.float32,0xc1c2c75c,0x3f3504f8,2
+np.float32,0x4242c75c,0x354bbe8a,2
+np.float32,0xc242c75c,0x354bbe8a,2
+np.float32,0x42c2c75c,0xbf800000,2
+np.float32,0xc2c2c75c,0xbf800000,2
+np.float32,0x41c90fdb,0x3f800000,2
+np.float32,0xc1c90fdb,0x3f800000,2
+np.float32,0x42490fdb,0x3f800000,2
+np.float32,0xc2490fdb,0x3f800000,2
+np.float32,0x42c90fdb,0x3f800000,2
+np.float32,0xc2c90fdb,0x3f800000,2
+np.float32,0x41cf585a,0x3f3504e7,2
+np.float32,0xc1cf585a,0x3f3504e7,2
+np.float32,0x424f585a,0xb608cd8c,2
+np.float32,0xc24f585a,0xb608cd8c,2
+np.float32,0x42cf585a,0xbf800000,2
+np.float32,0xc2cf585a,0xbf800000,2
+np.float32,0x41d5a0d9,0xb5b3bc81,2
+np.float32,0xc1d5a0d9,0xb5b3bc81,2
+np.float32,0x4255a0d9,0xbf800000,2
+np.float32,0xc255a0d9,0xbf800000,2
+np.float32,0x42d5a0d9,0x3f800000,2
+np.float32,0xc2d5a0d9,0x3f800000,2
+np.float32,0x41dbe958,0xbf350507,2
+np.float32,0xc1dbe958,0xbf350507,2
+np.float32,0x425be958,0x365eab75,2
+np.float32,0xc25be958,0x365eab75,2
+np.float32,0x42dbe958,0xbf800000,2
+np.float32,0xc2dbe958,0xbf800000,2
+np.float32,0x41e231d6,0xbf800000,2
+np.float32,0xc1e231d6,0xbf800000,2
+np.float32,0x426231d6,0x3f800000,2
+np.float32,0xc26231d6,0x3f800000,2
+np.float32,0x42e231d6,0x3f800000,2
+np.float32,0xc2e231d6,0x3f800000,2
+np.float32,0x41e87a55,0xbf3504ef,2
+np.float32,0xc1e87a55,0xbf3504ef,2
+np.float32,0x42687a55,0xb552257b,2
+np.float32,0xc2687a55,0xb552257b,2
+np.float32,0x42e87a55,0xbf800000,2
+np.float32,0xc2e87a55,0xbf800000,2
+np.float32,0x41eec2d4,0x353ef0a7,2
+np.float32,0xc1eec2d4,0x353ef0a7,2
+np.float32,0x426ec2d4,0xbf800000,2
+np.float32,0xc26ec2d4,0xbf800000,2
+np.float32,0x42eec2d4,0x3f800000,2
+np.float32,0xc2eec2d4,0x3f800000,2
+np.float32,0x41f50b53,0x3f3504ff,2
+np.float32,0xc1f50b53,0x3f3504ff,2
+np.float32,0x42750b53,0x360a6748,2
+np.float32,0xc2750b53,0x360a6748,2
+np.float32,0x42f50b53,0xbf800000,2
+np.float32,0xc2f50b53,0xbf800000,2
+np.float32,0x41fb53d2,0x3f800000,2
+np.float32,0xc1fb53d2,0x3f800000,2
+np.float32,0x427b53d2,0x3f800000,2
+np.float32,0xc27b53d2,0x3f800000,2
+np.float32,0x42fb53d2,0x3f800000,2
+np.float32,0xc2fb53d2,0x3f800000,2
+np.float32,0x4200ce28,0x3f3504f6,2
+np.float32,0xc200ce28,0x3f3504f6,2
+np.float32,0x4280ce28,0x34fdd672,2
+np.float32,0xc280ce28,0x34fdd672,2
+np.float32,0x4300ce28,0xbf800000,2
+np.float32,0xc300ce28,0xbf800000,2
+np.float32,0x4203f268,0xb6059a13,2
+np.float32,0xc203f268,0xb6059a13,2
+np.float32,0x4283f268,0xbf800000,2
+np.float32,0xc283f268,0xbf800000,2
+np.float32,0x4303f268,0x3f800000,2
+np.float32,0xc303f268,0x3f800000,2
+np.float32,0x420716a7,0xbf3504f8,2
+np.float32,0xc20716a7,0xbf3504f8,2
+np.float32,0x428716a7,0x35588c6d,2
+np.float32,0xc28716a7,0x35588c6d,2
+np.float32,0x430716a7,0xbf800000,2
+np.float32,0xc30716a7,0xbf800000,2
+np.float32,0x420a3ae7,0xbf800000,2
+np.float32,0xc20a3ae7,0xbf800000,2
+np.float32,0x428a3ae7,0x3f800000,2
+np.float32,0xc28a3ae7,0x3f800000,2
+np.float32,0x430a3ae7,0x3f800000,2
+np.float32,0xc30a3ae7,0x3f800000,2
+np.float32,0x420d5f26,0xbf3504e7,2
+np.float32,0xc20d5f26,0xbf3504e7,2
+np.float32,0x428d5f26,0xb60c0105,2
+np.float32,0xc28d5f26,0xb60c0105,2
+np.float32,0x430d5f26,0xbf800000,2
+np.float32,0xc30d5f26,0xbf800000,2
+np.float32,0x42108365,0xb512200d,2
+np.float32,0xc2108365,0xb512200d,2
+np.float32,0x42908365,0xbf800000,2
+np.float32,0xc2908365,0xbf800000,2
+np.float32,0x43108365,0x3f800000,2
+np.float32,0xc3108365,0x3f800000,2
+np.float32,0x4213a7a5,0x3f350507,2
+np.float32,0xc213a7a5,0x3f350507,2
+np.float32,0x4293a7a5,0x3661deee,2
+np.float32,0xc293a7a5,0x3661deee,2
+np.float32,0x4313a7a5,0xbf800000,2
+np.float32,0xc313a7a5,0xbf800000,2
+np.float32,0x4216cbe4,0x3f800000,2
+np.float32,0xc216cbe4,0x3f800000,2
+np.float32,0x4296cbe4,0x3f800000,2
+np.float32,0xc296cbe4,0x3f800000,2
+np.float32,0x4316cbe4,0x3f800000,2
+np.float32,0xc316cbe4,0x3f800000,2
+np.float32,0x4219f024,0x3f3504d8,2
+np.float32,0xc219f024,0x3f3504d8,2
+np.float32,0x4299f024,0xb69bde6c,2
+np.float32,0xc299f024,0xb69bde6c,2
+np.float32,0x4319f024,0xbf800000,2
+np.float32,0xc319f024,0xbf800000,2
+np.float32,0x421d1463,0xb5455799,2
+np.float32,0xc21d1463,0xb5455799,2
+np.float32,0x429d1463,0xbf800000,2
+np.float32,0xc29d1463,0xbf800000,2
+np.float32,0x431d1463,0x3f800000,2
+np.float32,0xc31d1463,0x3f800000,2
+np.float32,0x422038a3,0xbf350516,2
+np.float32,0xc22038a3,0xbf350516,2
+np.float32,0x42a038a3,0x36c6cd61,2
+np.float32,0xc2a038a3,0x36c6cd61,2
+np.float32,0x432038a3,0xbf800000,2
+np.float32,0xc32038a3,0xbf800000,2
+np.float32,0x42235ce2,0xbf800000,2
+np.float32,0xc2235ce2,0xbf800000,2
+np.float32,0x42a35ce2,0x3f800000,2
+np.float32,0xc2a35ce2,0x3f800000,2
+np.float32,0x43235ce2,0x3f800000,2
+np.float32,0xc3235ce2,0x3f800000,2
+np.float32,0x42268121,0xbf3504f6,2
+np.float32,0xc2268121,0xbf3504f6,2
+np.float32,0x42a68121,0x34e43aac,2
+np.float32,0xc2a68121,0x34e43aac,2
+np.float32,0x43268121,0xbf800000,2
+np.float32,0xc3268121,0xbf800000,2
+np.float32,0x4229a561,0x360733d0,2
+np.float32,0xc229a561,0x360733d0,2
+np.float32,0x42a9a561,0xbf800000,2
+np.float32,0xc2a9a561,0xbf800000,2
+np.float32,0x4329a561,0x3f800000,2
+np.float32,0xc329a561,0x3f800000,2
+np.float32,0x422cc9a0,0x3f3504f8,2
+np.float32,0xc22cc9a0,0x3f3504f8,2
+np.float32,0x42acc9a0,0x35655a50,2
+np.float32,0xc2acc9a0,0x35655a50,2
+np.float32,0x432cc9a0,0xbf800000,2
+np.float32,0xc32cc9a0,0xbf800000,2
+np.float32,0x422fede0,0x3f800000,2
+np.float32,0xc22fede0,0x3f800000,2
+np.float32,0x42afede0,0x3f800000,2
+np.float32,0xc2afede0,0x3f800000,2
+np.float32,0x432fede0,0x3f800000,2
+np.float32,0xc32fede0,0x3f800000,2
+np.float32,0x4233121f,0x3f3504e7,2
+np.float32,0xc233121f,0x3f3504e7,2
+np.float32,0x42b3121f,0xb60f347d,2
+np.float32,0xc2b3121f,0xb60f347d,2
+np.float32,0x4333121f,0xbf800000,2
+np.float32,0xc333121f,0xbf800000,2
+np.float32,0x4236365e,0x350bb91c,2
+np.float32,0xc236365e,0x350bb91c,2
+np.float32,0x42b6365e,0xbf800000,2
+np.float32,0xc2b6365e,0xbf800000,2
+np.float32,0x4336365e,0x3f800000,2
+np.float32,0xc336365e,0x3f800000,2
+np.float32,0x42395a9e,0xbf350507,2
+np.float32,0xc2395a9e,0xbf350507,2
+np.float32,0x42b95a9e,0x36651267,2
+np.float32,0xc2b95a9e,0x36651267,2
+np.float32,0x43395a9e,0xbf800000,2
+np.float32,0xc3395a9e,0xbf800000,2
+np.float32,0x423c7edd,0xbf800000,2
+np.float32,0xc23c7edd,0xbf800000,2
+np.float32,0x42bc7edd,0x3f800000,2
+np.float32,0xc2bc7edd,0x3f800000,2
+np.float32,0x433c7edd,0x3f800000,2
+np.float32,0xc33c7edd,0x3f800000,2
+np.float32,0x423fa31d,0xbf3504d7,2
+np.float32,0xc23fa31d,0xbf3504d7,2
+np.float32,0x42bfa31d,0xb69d7828,2
+np.float32,0xc2bfa31d,0xb69d7828,2
+np.float32,0x433fa31d,0xbf800000,2
+np.float32,0xc33fa31d,0xbf800000,2
+np.float32,0x4242c75c,0x354bbe8a,2
+np.float32,0xc242c75c,0x354bbe8a,2
+np.float32,0x42c2c75c,0xbf800000,2
+np.float32,0xc2c2c75c,0xbf800000,2
+np.float32,0x4342c75c,0x3f800000,2
+np.float32,0xc342c75c,0x3f800000,2
+np.float32,0x4245eb9c,0x3f350517,2
+np.float32,0xc245eb9c,0x3f350517,2
+np.float32,0x42c5eb9c,0x36c8671d,2
+np.float32,0xc2c5eb9c,0x36c8671d,2
+np.float32,0x4345eb9c,0xbf800000,2
+np.float32,0xc345eb9c,0xbf800000,2
+np.float32,0x42490fdb,0x3f800000,2
+np.float32,0xc2490fdb,0x3f800000,2
+np.float32,0x42c90fdb,0x3f800000,2
+np.float32,0xc2c90fdb,0x3f800000,2
+np.float32,0x43490fdb,0x3f800000,2
+np.float32,0xc3490fdb,0x3f800000,2
+np.float32,0x424c341a,0x3f3504f5,2
+np.float32,0xc24c341a,0x3f3504f5,2
+np.float32,0x42cc341a,0x34ca9ee6,2
+np.float32,0xc2cc341a,0x34ca9ee6,2
+np.float32,0x434c341a,0xbf800000,2
+np.float32,0xc34c341a,0xbf800000,2
+np.float32,0x424f585a,0xb608cd8c,2
+np.float32,0xc24f585a,0xb608cd8c,2
+np.float32,0x42cf585a,0xbf800000,2
+np.float32,0xc2cf585a,0xbf800000,2
+np.float32,0x434f585a,0x3f800000,2
+np.float32,0xc34f585a,0x3f800000,2
+np.float32,0x42527c99,0xbf3504f9,2
+np.float32,0xc2527c99,0xbf3504f9,2
+np.float32,0x42d27c99,0x35722833,2
+np.float32,0xc2d27c99,0x35722833,2
+np.float32,0x43527c99,0xbf800000,2
+np.float32,0xc3527c99,0xbf800000,2
+np.float32,0x4255a0d9,0xbf800000,2
+np.float32,0xc255a0d9,0xbf800000,2
+np.float32,0x42d5a0d9,0x3f800000,2
+np.float32,0xc2d5a0d9,0x3f800000,2
+np.float32,0x4355a0d9,0x3f800000,2
+np.float32,0xc355a0d9,0x3f800000,2
+np.float32,0x4258c518,0xbf3504e6,2
+np.float32,0xc258c518,0xbf3504e6,2
+np.float32,0x42d8c518,0xb61267f6,2
+np.float32,0xc2d8c518,0xb61267f6,2
+np.float32,0x4358c518,0xbf800000,2
+np.float32,0xc358c518,0xbf800000,2
+np.float32,0x425be958,0x365eab75,2
+np.float32,0xc25be958,0x365eab75,2
+np.float32,0x42dbe958,0xbf800000,2
+np.float32,0xc2dbe958,0xbf800000,2
+np.float32,0x435be958,0x3f800000,2
+np.float32,0xc35be958,0x3f800000,2
+np.float32,0x425f0d97,0x3f350508,2
+np.float32,0xc25f0d97,0x3f350508,2
+np.float32,0x42df0d97,0x366845e0,2
+np.float32,0xc2df0d97,0x366845e0,2
+np.float32,0x435f0d97,0xbf800000,2
+np.float32,0xc35f0d97,0xbf800000,2
+np.float32,0x426231d6,0x3f800000,2
+np.float32,0xc26231d6,0x3f800000,2
+np.float32,0x42e231d6,0x3f800000,2
+np.float32,0xc2e231d6,0x3f800000,2
+np.float32,0x436231d6,0x3f800000,2
+np.float32,0xc36231d6,0x3f800000,2
+np.float32,0x42655616,0x3f3504d7,2
+np.float32,0xc2655616,0x3f3504d7,2
+np.float32,0x42e55616,0xb69f11e5,2
+np.float32,0xc2e55616,0xb69f11e5,2
+np.float32,0x43655616,0xbf800000,2
+np.float32,0xc3655616,0xbf800000,2
+np.float32,0x42687a55,0xb552257b,2
+np.float32,0xc2687a55,0xb552257b,2
+np.float32,0x42e87a55,0xbf800000,2
+np.float32,0xc2e87a55,0xbf800000,2
+np.float32,0x43687a55,0x3f800000,2
+np.float32,0xc3687a55,0x3f800000,2
+np.float32,0x426b9e95,0xbf350517,2
+np.float32,0xc26b9e95,0xbf350517,2
+np.float32,0x42eb9e95,0x36ca00d9,2
+np.float32,0xc2eb9e95,0x36ca00d9,2
+np.float32,0x436b9e95,0xbf800000,2
+np.float32,0xc36b9e95,0xbf800000,2
+np.float32,0x426ec2d4,0xbf800000,2
+np.float32,0xc26ec2d4,0xbf800000,2
+np.float32,0x42eec2d4,0x3f800000,2
+np.float32,0xc2eec2d4,0x3f800000,2
+np.float32,0x436ec2d4,0x3f800000,2
+np.float32,0xc36ec2d4,0x3f800000,2
+np.float32,0x4271e713,0xbf3504f5,2
+np.float32,0xc271e713,0xbf3504f5,2
+np.float32,0x42f1e713,0x34b10321,2
+np.float32,0xc2f1e713,0x34b10321,2
+np.float32,0x4371e713,0xbf800000,2
+np.float32,0xc371e713,0xbf800000,2
+np.float32,0x42750b53,0x360a6748,2
+np.float32,0xc2750b53,0x360a6748,2
+np.float32,0x42f50b53,0xbf800000,2
+np.float32,0xc2f50b53,0xbf800000,2
+np.float32,0x43750b53,0x3f800000,2
+np.float32,0xc3750b53,0x3f800000,2
+np.float32,0x42782f92,0x3f3504f9,2
+np.float32,0xc2782f92,0x3f3504f9,2
+np.float32,0x42f82f92,0x357ef616,2
+np.float32,0xc2f82f92,0x357ef616,2
+np.float32,0x43782f92,0xbf800000,2
+np.float32,0xc3782f92,0xbf800000,2
+np.float32,0x427b53d2,0x3f800000,2
+np.float32,0xc27b53d2,0x3f800000,2
+np.float32,0x42fb53d2,0x3f800000,2
+np.float32,0xc2fb53d2,0x3f800000,2
+np.float32,0x437b53d2,0x3f800000,2
+np.float32,0xc37b53d2,0x3f800000,2
+np.float32,0x427e7811,0x3f3504e6,2
+np.float32,0xc27e7811,0x3f3504e6,2
+np.float32,0x42fe7811,0xb6159b6f,2
+np.float32,0xc2fe7811,0xb6159b6f,2
+np.float32,0x437e7811,0xbf800000,2
+np.float32,0xc37e7811,0xbf800000,2
+np.float32,0x4280ce28,0x34fdd672,2
+np.float32,0xc280ce28,0x34fdd672,2
+np.float32,0x4300ce28,0xbf800000,2
+np.float32,0xc300ce28,0xbf800000,2
+np.float32,0x4380ce28,0x3f800000,2
+np.float32,0xc380ce28,0x3f800000,2
+np.float32,0x42826048,0xbf350508,2
+np.float32,0xc2826048,0xbf350508,2
+np.float32,0x43026048,0x366b7958,2
+np.float32,0xc3026048,0x366b7958,2
+np.float32,0x43826048,0xbf800000,2
+np.float32,0xc3826048,0xbf800000,2
+np.float32,0x4283f268,0xbf800000,2
+np.float32,0xc283f268,0xbf800000,2
+np.float32,0x4303f268,0x3f800000,2
+np.float32,0xc303f268,0x3f800000,2
+np.float32,0x4383f268,0x3f800000,2
+np.float32,0xc383f268,0x3f800000,2
+np.float32,0x42858487,0xbf350504,2
+np.float32,0xc2858487,0xbf350504,2
+np.float32,0x43058487,0x363ea8be,2
+np.float32,0xc3058487,0x363ea8be,2
+np.float32,0x43858487,0xbf800000,2
+np.float32,0xc3858487,0xbf800000,2
+np.float32,0x428716a7,0x35588c6d,2
+np.float32,0xc28716a7,0x35588c6d,2
+np.float32,0x430716a7,0xbf800000,2
+np.float32,0xc30716a7,0xbf800000,2
+np.float32,0x438716a7,0x3f800000,2
+np.float32,0xc38716a7,0x3f800000,2
+np.float32,0x4288a8c7,0x3f350517,2
+np.float32,0xc288a8c7,0x3f350517,2
+np.float32,0x4308a8c7,0x36cb9a96,2
+np.float32,0xc308a8c7,0x36cb9a96,2
+np.float32,0x4388a8c7,0xbf800000,2
+np.float32,0xc388a8c7,0xbf800000,2
+np.float32,0x428a3ae7,0x3f800000,2
+np.float32,0xc28a3ae7,0x3f800000,2
+np.float32,0x430a3ae7,0x3f800000,2
+np.float32,0xc30a3ae7,0x3f800000,2
+np.float32,0x438a3ae7,0x3f800000,2
+np.float32,0xc38a3ae7,0x3f800000,2
+np.float32,0x428bcd06,0x3f3504f5,2
+np.float32,0xc28bcd06,0x3f3504f5,2
+np.float32,0x430bcd06,0x3497675b,2
+np.float32,0xc30bcd06,0x3497675b,2
+np.float32,0x438bcd06,0xbf800000,2
+np.float32,0xc38bcd06,0xbf800000,2
+np.float32,0x428d5f26,0xb60c0105,2
+np.float32,0xc28d5f26,0xb60c0105,2
+np.float32,0x430d5f26,0xbf800000,2
+np.float32,0xc30d5f26,0xbf800000,2
+np.float32,0x438d5f26,0x3f800000,2
+np.float32,0xc38d5f26,0x3f800000,2
+np.float32,0x428ef146,0xbf350526,2
+np.float32,0xc28ef146,0xbf350526,2
+np.float32,0x430ef146,0x3710bc40,2
+np.float32,0xc30ef146,0x3710bc40,2
+np.float32,0x438ef146,0xbf800000,2
+np.float32,0xc38ef146,0xbf800000,2
+np.float32,0x42908365,0xbf800000,2
+np.float32,0xc2908365,0xbf800000,2
+np.float32,0x43108365,0x3f800000,2
+np.float32,0xc3108365,0x3f800000,2
+np.float32,0x43908365,0x3f800000,2
+np.float32,0xc3908365,0x3f800000,2
+np.float32,0x42921585,0xbf3504e6,2
+np.float32,0xc2921585,0xbf3504e6,2
+np.float32,0x43121585,0xb618cee8,2
+np.float32,0xc3121585,0xb618cee8,2
+np.float32,0x43921585,0xbf800000,2
+np.float32,0xc3921585,0xbf800000,2
+np.float32,0x4293a7a5,0x3661deee,2
+np.float32,0xc293a7a5,0x3661deee,2
+np.float32,0x4313a7a5,0xbf800000,2
+np.float32,0xc313a7a5,0xbf800000,2
+np.float32,0x4393a7a5,0x3f800000,2
+np.float32,0xc393a7a5,0x3f800000,2
+np.float32,0x429539c5,0x3f350536,2
+np.float32,0xc29539c5,0x3f350536,2
+np.float32,0x431539c5,0x373bab34,2
+np.float32,0xc31539c5,0x373bab34,2
+np.float32,0x439539c5,0xbf800000,2
+np.float32,0xc39539c5,0xbf800000,2
+np.float32,0x4296cbe4,0x3f800000,2
+np.float32,0xc296cbe4,0x3f800000,2
+np.float32,0x4316cbe4,0x3f800000,2
+np.float32,0xc316cbe4,0x3f800000,2
+np.float32,0x4396cbe4,0x3f800000,2
+np.float32,0xc396cbe4,0x3f800000,2
+np.float32,0x42985e04,0x3f3504d7,2
+np.float32,0xc2985e04,0x3f3504d7,2
+np.float32,0x43185e04,0xb6a2455d,2
+np.float32,0xc3185e04,0xb6a2455d,2
+np.float32,0x43985e04,0xbf800000,2
+np.float32,0xc3985e04,0xbf800000,2
+np.float32,0x4299f024,0xb69bde6c,2
+np.float32,0xc299f024,0xb69bde6c,2
+np.float32,0x4319f024,0xbf800000,2
+np.float32,0xc319f024,0xbf800000,2
+np.float32,0x4399f024,0x3f800000,2
+np.float32,0xc399f024,0x3f800000,2
+np.float32,0x429b8243,0xbf3504ea,2
+np.float32,0xc29b8243,0xbf3504ea,2
+np.float32,0x431b8243,0xb5cb2eb8,2
+np.float32,0xc31b8243,0xb5cb2eb8,2
+np.float32,0x439b8243,0xbf800000,2
+np.float32,0xc39b8243,0xbf800000,2
+np.float32,0x435b2047,0x3f3504c1,2
+np.float32,0x42a038a2,0xb5e4ca7e,2
+np.float32,0x432038a2,0xbf800000,2
+np.float32,0x4345eb9b,0xbf800000,2
+np.float32,0x42c5eb9b,0xb5de638c,2
+np.float32,0x42eb9e94,0xb5d7fc9b,2
+np.float32,0x4350ea79,0x3631dadb,2
+np.float32,0x42dbe957,0xbf800000,2
+np.float32,0x425be957,0xb505522a,2
+np.float32,0x435be957,0x3f800000,2
+np.float32,0x487fe5ab,0xba140185,2
+np.float32,0x497fe5ab,0x3f7fffd5,2
+np.float32,0x49ffe5ab,0x3f7fff55,2
+np.float32,0x49ffeb37,0x3b9382f5,2
+np.float32,0x497ff0c3,0x3b13049f,2
+np.float32,0x49fff0c3,0xbf7fff57,2
+np.float32,0x49fff64f,0xbb928618,2
+np.float32,0x497ffbdb,0xbf7fffd6,2
+np.float32,0x49fffbdb,0x3f7fff59,2
+np.float32,0x48fffbdb,0xba9207c6,2
+np.float32,0x4e736e56,0xbf800000,2
+np.float32,0x4d4da377,0xbf800000,2
+np.float32,0x4ece58c3,0xbf800000,2
+np.float32,0x4ee0db9c,0xbf800000,2
+np.float32,0x4dee7002,0x3f800000,2
+np.float32,0x4ee86afc,0x38857a23,2
+np.float32,0x4dca4f3f,0xbf800000,2
+np.float32,0x4ecb48af,0xb95d1e10,2
+np.float32,0x4e51e33f,0xbf800000,2
+np.float32,0x4ef5f421,0xbf800000,2
+np.float32,0x46027eb2,0x3e7d94c9,2
+np.float32,0x4477baed,0xbe7f1824,2
+np.float32,0x454b8024,0x3e7f5268,2
+np.float32,0x455d2c09,0x3e7f40cb,2
+np.float32,0x4768d3de,0xba14b4af,2
+np.float32,0x46c1e7cd,0x3e7fb102,2
+np.float32,0x44a52949,0xbe7dc9d5,2
+np.float32,0x4454633a,0x3e7dbc7d,2
+np.float32,0x4689810b,0x3e7eb02b,2
+np.float32,0x473473cd,0xbe7eef6f,2
+np.float32,0x44a5193f,0x3e7e1b1f,2
+np.float32,0x46004b36,0x3e7dac59,2
+np.float32,0x467f604b,0x3d7ffd3a,2
+np.float32,0x45ea1805,0x3dffd2e0,2
+np.float32,0x457b6af3,0x3dff7831,2
+np.float32,0x44996159,0xbe7d85f4,2
+np.float32,0x47883553,0xbb80584e,2
+np.float32,0x44e19f0c,0xbdffcfe6,2
+np.float32,0x472b3bf6,0xbe7f7a82,2
+np.float32,0x4600bb4e,0x3a135e33,2
+np.float32,0x449f4556,0x3e7e42e5,2
+np.float32,0x474e9420,0x3dff77b2,2
+np.float32,0x45cbdb23,0x3dff7240,2
+np.float32,0x44222747,0x3dffb039,2
+np.float32,0x4772e419,0xbdff74b8,2
diff --git a/numpy/core/tests/data/umath-validation-set-exp b/numpy/core/tests/data/umath-validation-set-exp
new file mode 100644
index 000000000..1b2cc9ce4
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-exp
@@ -0,0 +1,135 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0x3f800000,3
+np.float32,0x007b2490,0x3f800000,3
+np.float32,0x007c99fa,0x3f800000,3
+np.float32,0x00734a0c,0x3f800000,3
+np.float32,0x0070de24,0x3f800000,3
+np.float32,0x00495d65,0x3f800000,3
+np.float32,0x006894f6,0x3f800000,3
+np.float32,0x00555a76,0x3f800000,3
+np.float32,0x004e1fb8,0x3f800000,3
+np.float32,0x00687de9,0x3f800000,3
+## -ve denormals ##
+np.float32,0x805b59af,0x3f800000,3
+np.float32,0x807ed8ed,0x3f800000,3
+np.float32,0x807142ad,0x3f800000,3
+np.float32,0x80772002,0x3f800000,3
+np.float32,0x8062abcb,0x3f800000,3
+np.float32,0x8045e31c,0x3f800000,3
+np.float32,0x805f01c2,0x3f800000,3
+np.float32,0x80506432,0x3f800000,3
+np.float32,0x8060089d,0x3f800000,3
+np.float32,0x8071292f,0x3f800000,3
+## floats that output a denormal ##
+np.float32,0xc2cf3fc1,0x00000001,3
+np.float32,0xc2c79726,0x00000021,3
+np.float32,0xc2cb295d,0x00000005,3
+np.float32,0xc2b49e6b,0x00068c4c,3
+np.float32,0xc2ca8116,0x00000008,3
+np.float32,0xc2c23f82,0x000001d7,3
+np.float32,0xc2cb69c0,0x00000005,3
+np.float32,0xc2cc1f4d,0x00000003,3
+np.float32,0xc2ae094e,0x00affc4c,3
+np.float32,0xc2c86c44,0x00000015,3
+## random floats between -87.0f and 88.0f ##
+np.float32,0x4030d7e0,0x417d9a05,3
+np.float32,0x426f60e8,0x6aa1be2c,3
+np.float32,0x41a1b220,0x4e0efc11,3
+np.float32,0xc20cc722,0x26159da7,3
+np.float32,0x41c492bc,0x512ec79d,3
+np.float32,0x40980210,0x42e73a0e,3
+np.float32,0xbf1f7b80,0x3f094de3,3
+np.float32,0x42a678a4,0x7b87a383,3
+np.float32,0xc20f3cfd,0x25a1c304,3
+np.float32,0x423ff34c,0x6216467f,3
+np.float32,0x00000000,0x3f800000,3
+## floats that cause an overflow ##
+np.float32,0x7f06d8c1,0x7f800000,3
+np.float32,0x7f451912,0x7f800000,3
+np.float32,0x7ecceac3,0x7f800000,3
+np.float32,0x7f643b45,0x7f800000,3
+np.float32,0x7e910ea0,0x7f800000,3
+np.float32,0x7eb4756b,0x7f800000,3
+np.float32,0x7f4ec708,0x7f800000,3
+np.float32,0x7f6b4551,0x7f800000,3
+np.float32,0x7d8edbda,0x7f800000,3
+np.float32,0x7f730718,0x7f800000,3
+np.float32,0x42b17217,0x7f7fff84,3
+np.float32,0x42b17218,0x7f800000,3
+np.float32,0x42b17219,0x7f800000,3
+np.float32,0xfef2b0bc,0x00000000,3
+np.float32,0xff69f83e,0x00000000,3
+np.float32,0xff4ecb12,0x00000000,3
+np.float32,0xfeac6d86,0x00000000,3
+np.float32,0xfde0cdb8,0x00000000,3
+np.float32,0xff26aef4,0x00000000,3
+np.float32,0xff6f9277,0x00000000,3
+np.float32,0xff7adfc4,0x00000000,3
+np.float32,0xff0ad40e,0x00000000,3
+np.float32,0xff6fd8f3,0x00000000,3
+np.float32,0xc2cff1b4,0x00000001,3
+np.float32,0xc2cff1b5,0x00000000,3
+np.float32,0xc2cff1b6,0x00000000,3
+np.float32,0x7f800000,0x7f800000,3
+np.float32,0xff800000,0x00000000,3
+np.float32,0x4292f27c,0x7480000a,3
+np.float32,0x42a920be,0x7c7fff94,3
+np.float32,0x41c214c9,0x50ffffd9,3
+np.float32,0x41abe686,0x4effffd9,3
+np.float32,0x4287db5a,0x707fffd3,3
+np.float32,0x41902cbb,0x4c800078,3
+np.float32,0x42609466,0x67ffffeb,3
+np.float32,0x41a65af5,0x4e7fffd1,3
+np.float32,0x417f13ff,0x4affffc9,3
+np.float32,0x426d0e6c,0x6a3504f2,3
+np.float32,0x41bc8934,0x507fff51,3
+np.float32,0x42a7bdde,0x7c0000d6,3
+np.float32,0x4120cf66,0x46b504f6,3
+np.float32,0x4244da8f,0x62ffff1a,3
+np.float32,0x41a0cf69,0x4e000034,3
+np.float32,0x41cd2bec,0x52000005,3
+np.float32,0x42893e41,0x7100009e,3
+np.float32,0x41b437e1,0x4fb50502,3
+np.float32,0x41d8430f,0x5300001d,3
+np.float32,0x4244da92,0x62ffffda,3
+np.float32,0x41a0cf63,0x4dffffa9,3
+np.float32,0x3eb17218,0x3fb504f3,3
+np.float32,0x428729e8,0x703504dc,3
+np.float32,0x41a0cf67,0x4e000014,3
+np.float32,0x4252b77d,0x65800011,3
+np.float32,0x41902cb9,0x4c800058,3
+np.float32,0x42a0cf67,0x79800052,3
+np.float32,0x4152b77b,0x48ffffe9,3
+np.float32,0x41265af3,0x46ffffc8,3
+np.float32,0x42187e0b,0x5affff9a,3
+np.float32,0xc0d2b77c,0x3ab504f6,3
+np.float32,0xc283b2ac,0x10000072,3
+np.float32,0xc1cff1b4,0x2cb504f5,3
+np.float32,0xc05dce9e,0x3d000000,3
+np.float32,0xc28ec9d2,0x0bfffea5,3
+np.float32,0xc23c893a,0x1d7fffde,3
+np.float32,0xc2a920c0,0x027fff6c,3
+np.float32,0xc1f9886f,0x2900002b,3
+np.float32,0xc2c42920,0x000000b5,3
+np.float32,0xc2893e41,0x0dfffec5,3
+np.float32,0xc2c4da93,0x00000080,3
+np.float32,0xc17f1401,0x3400000c,3
+np.float32,0xc1902cb6,0x327fffaf,3
+np.float32,0xc27c4e3b,0x11ffffc5,3
+np.float32,0xc268e5c5,0x157ffe9d,3
+np.float32,0xc2b4e953,0x0005a826,3
+np.float32,0xc287db5a,0x0e800016,3
+np.float32,0xc207db5a,0x2700000b,3
+np.float32,0xc2b2d4fe,0x000ffff1,3
+np.float32,0xc268e5c0,0x157fffdd,3
+np.float32,0xc22920bd,0x2100003b,3
+np.float32,0xc2902caf,0x0b80011e,3
+np.float32,0xc1902cba,0x327fff2f,3
+np.float32,0xc2ca6625,0x00000008,3
+np.float32,0xc280ece8,0x10fffeb5,3
+np.float32,0xc2918f94,0x0b0000ea,3
+np.float32,0xc29b43d5,0x077ffffc,3
+np.float32,0xc1e61ff7,0x2ab504f5,3
+np.float32,0xc2867878,0x0effff15,3
+np.float32,0xc2a2324a,0x04fffff4,3
diff --git a/numpy/core/tests/data/umath-validation-set-log b/numpy/core/tests/data/umath-validation-set-log
new file mode 100644
index 000000000..a7bd98481
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-log
@@ -0,0 +1,118 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0xc2afbc1b,4
+np.float32,0x007b2490,0xc2aec01e,4
+np.float32,0x007c99fa,0xc2aeba17,4
+np.float32,0x00734a0c,0xc2aee1dc,4
+np.float32,0x0070de24,0xc2aeecba,4
+np.float32,0x007fffff,0xc2aeac50,4
+np.float32,0x00000001,0xc2ce8ed0,4
+## -ve denormals ##
+np.float32,0x80495d65,0xffc00000,4
+np.float32,0x806894f6,0xffc00000,4
+np.float32,0x80555a76,0xffc00000,4
+np.float32,0x804e1fb8,0xffc00000,4
+np.float32,0x80687de9,0xffc00000,4
+np.float32,0x807fffff,0xffc00000,4
+np.float32,0x80000001,0xffc00000,4
+## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
+np.float32,0x00000000,0xff800000,4
+np.float32,0x80000000,0xff800000,4
+np.float32,0x7f7fffff,0x42b17218,4
+np.float32,0x80800000,0xffc00000,4
+np.float32,0xff7fffff,0xffc00000,4
+## 1.00f + 0x00000001 ##
+np.float32,0x3f800000,0x00000000,4
+np.float32,0x3f800001,0x33ffffff,4
+np.float32,0x3f800002,0x347ffffe,4
+np.float32,0x3f7fffff,0xb3800000,4
+np.float32,0x3f7ffffe,0xb4000000,4
+np.float32,0x3f7ffffd,0xb4400001,4
+np.float32,0x402df853,0x3f7ffffe,4
+np.float32,0x402df854,0x3f7fffff,4
+np.float32,0x402df855,0x3f800000,4
+np.float32,0x402df856,0x3f800001,4
+np.float32,0x3ebc5ab0,0xbf800001,4
+np.float32,0x3ebc5ab1,0xbf800000,4
+np.float32,0x3ebc5ab2,0xbf800000,4
+np.float32,0x3ebc5ab3,0xbf7ffffe,4
+np.float32,0x423ef575,0x407768ab,4
+np.float32,0x427b8c61,0x408485dd,4
+np.float32,0x4211e9ee,0x406630b0,4
+np.float32,0x424d5c41,0x407c0fed,4
+np.float32,0x42be722a,0x4091cc91,4
+np.float32,0x42b73d30,0x4090908b,4
+np.float32,0x427e48e2,0x4084de7f,4
+np.float32,0x428f759b,0x4088bba3,4
+np.float32,0x41629069,0x4029a0cc,4
+np.float32,0x4272c99d,0x40836379,4
+np.float32,0x4d1b7458,0x4197463d,4
+np.float32,0x4f10c594,0x41ace2b2,4
+np.float32,0x4ea397c2,0x41a85171,4
+np.float32,0x4fefa9d1,0x41b6769c,4
+np.float32,0x4ebac6ab,0x41a960dc,4
+np.float32,0x4f6efb42,0x41b0e535,4
+np.float32,0x4e9ab8e7,0x41a7df44,4
+np.float32,0x4e81b5d1,0x41a67625,4
+np.float32,0x5014d9f2,0x41b832bd,4
+np.float32,0x4f02175c,0x41ac07b8,4
+np.float32,0x7f034f89,0x42b01c47,4
+np.float32,0x7f56d00e,0x42b11849,4
+np.float32,0x7f1cd5f6,0x42b0773a,4
+np.float32,0x7e979174,0x42af02d7,4
+np.float32,0x7f23369f,0x42b08ba2,4
+np.float32,0x7f0637ae,0x42b0277d,4
+np.float32,0x7efcb6e8,0x42b00897,4
+np.float32,0x7f7907c8,0x42b163f6,4
+np.float32,0x7e95c4c2,0x42aefcba,4
+np.float32,0x7f4577b2,0x42b0ed2d,4
+np.float32,0x3f49c92e,0xbe73ae84,4
+np.float32,0x3f4a23d1,0xbe71e2f8,4
+np.float32,0x3f4abb67,0xbe6ee430,4
+np.float32,0x3f48169a,0xbe7c5532,4
+np.float32,0x3f47f5fa,0xbe7cfc37,4
+np.float32,0x3f488309,0xbe7a2ad8,4
+np.float32,0x3f479df4,0xbe7ebf5f,4
+np.float32,0x3f47cfff,0xbe7dbec9,4
+np.float32,0x3f496704,0xbe75a125,4
+np.float32,0x3f478ee8,0xbe7f0c92,4
+np.float32,0x3f4a763b,0xbe7041ce,4
+np.float32,0x3f47a108,0xbe7eaf94,4
+np.float32,0x3f48136c,0xbe7c6578,4
+np.float32,0x3f481c17,0xbe7c391c,4
+np.float32,0x3f47cd28,0xbe7dcd56,4
+np.float32,0x3f478be8,0xbe7f1bf7,4
+np.float32,0x3f4c1f8e,0xbe67e367,4
+np.float32,0x3f489b0c,0xbe79b03f,4
+np.float32,0x3f4934cf,0xbe76a08a,4
+np.float32,0x3f4954df,0xbe75fd6a,4
+np.float32,0x3f47a3f5,0xbe7ea093,4
+np.float32,0x3f4ba4fc,0xbe6a4b02,4
+np.float32,0x3f47a0e1,0xbe7eb05c,4
+np.float32,0x3f48c30a,0xbe78e42f,4
+np.float32,0x3f48cab8,0xbe78bd05,4
+np.float32,0x3f4b0569,0xbe6d6ea4,4
+np.float32,0x3f47de32,0xbe7d7607,4
+np.float32,0x3f477328,0xbe7f9b00,4
+np.float32,0x3f496dab,0xbe757f52,4
+np.float32,0x3f47662c,0xbe7fddac,4
+np.float32,0x3f48ddd8,0xbe785b80,4
+np.float32,0x3f481866,0xbe7c4bff,4
+np.float32,0x3f48b119,0xbe793fb6,4
+np.float32,0x3f48c7e8,0xbe78cb5c,4
+np.float32,0x3f4985f6,0xbe7503da,4
+np.float32,0x3f483fdf,0xbe7b8212,4
+np.float32,0x3f4b1c76,0xbe6cfa67,4
+np.float32,0x3f480b2e,0xbe7c8fa8,4
+np.float32,0x3f48745f,0xbe7a75bf,4
+np.float32,0x3f485bda,0xbe7af308,4
+np.float32,0x3f47a660,0xbe7e942c,4
+np.float32,0x3f47d4d5,0xbe7da600,4
+np.float32,0x3f4b0a26,0xbe6d56be,4
+np.float32,0x3f4a4883,0xbe712924,4
+np.float32,0x3f4769e7,0xbe7fca84,4
+np.float32,0x3f499702,0xbe74ad3f,4
+np.float32,0x3f494ab1,0xbe763131,4
+np.float32,0x3f476b69,0xbe7fc2c6,4
+np.float32,0x3f4884e8,0xbe7a214a,4
+np.float32,0x3f486945,0xbe7aae76,4
diff --git a/numpy/core/tests/data/umath-validation-set-sin b/numpy/core/tests/data/umath-validation-set-sin
new file mode 100644
index 000000000..a56273195
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-sin
@@ -0,0 +1,707 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0x004b4716,2
+np.float32,0x007b2490,0x007b2490,2
+np.float32,0x007c99fa,0x007c99fa,2
+np.float32,0x00734a0c,0x00734a0c,2
+np.float32,0x0070de24,0x0070de24,2
+np.float32,0x007fffff,0x007fffff,2
+np.float32,0x00000001,0x00000001,2
+## -ve denormals ##
+np.float32,0x80495d65,0x80495d65,2
+np.float32,0x806894f6,0x806894f6,2
+np.float32,0x80555a76,0x80555a76,2
+np.float32,0x804e1fb8,0x804e1fb8,2
+np.float32,0x80687de9,0x80687de9,2
+np.float32,0x807fffff,0x807fffff,2
+np.float32,0x80000001,0x80000001,2
+## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
+np.float32,0x00000000,0x00000000,2
+np.float32,0x80000000,0x80000000,2
+np.float32,0x00800000,0x00800000,2
+np.float32,0x7f7fffff,0xbf0599b3,2
+np.float32,0x80800000,0x80800000,2
+np.float32,0xff7fffff,0x3f0599b3,2
+## 1.00f ##
+np.float32,0x3f800000,0x3f576aa4,2
+np.float32,0x3f800001,0x3f576aa6,2
+np.float32,0x3f800002,0x3f576aa7,2
+np.float32,0xc090a8b0,0x3f7b4e48,2
+np.float32,0x41ce3184,0x3f192d43,2
+np.float32,0xc1d85848,0xbf7161cb,2
+np.float32,0x402b8820,0x3ee3f29f,2
+np.float32,0x42b4e454,0x3f1d0151,2
+np.float32,0x42a67a60,0x3f7ffa4c,2
+np.float32,0x41d92388,0x3f67beef,2
+np.float32,0x422dd66c,0xbeffb0c1,2
+np.float32,0xc28f5be6,0xbf0bae79,2
+np.float32,0x41ab2674,0x3f0ffe2b,2
+np.float32,0xd0102756,0x3f227e8a,2
+np.float32,0xcf99405e,0x3f73ad00,2
+np.float32,0xcfd83a12,0xbf7151a7,2
+np.float32,0x4fb54db0,0xbe46354b,2
+np.float32,0xcfcca29d,0xbe9345e6,2
+np.float32,0xceec2ac0,0x3e98dc89,2
+np.float32,0xcfdca97f,0xbf60b2b4,2
+np.float32,0xcfe92b0a,0xbf222705,2
+np.float32,0x5014b0eb,0x3f63e75c,2
+np.float32,0xcfa7ee96,0x3f62ada4,2
+np.float32,0x754c09a0,0xbf617056,2
+np.float32,0x77a731fb,0x3f44472b,2
+np.float32,0x76de2494,0xbe680739,2
+np.float32,0xf74920dc,0xbf193338,2
+np.float32,0x7707a312,0xbf6f51b1,2
+np.float32,0x75bf9790,0xbd0f1a47,2
+np.float32,0xf4ca7c40,0xbf7d45e7,2
+np.float32,0x77e91899,0x3f767181,2
+np.float32,0xf74c9820,0xbd685b75,2
+np.float32,0x7785ca29,0x3f78ee61,2
+np.float32,0x3f490fdb,0x3f3504f3,2
+np.float32,0xbf490fdb,0xbf3504f3,2
+np.float32,0x3fc90fdb,0x3f800000,2
+np.float32,0xbfc90fdb,0xbf800000,2
+np.float32,0x40490fdb,0xb3bbbd2e,2
+np.float32,0xc0490fdb,0x33bbbd2e,2
+np.float32,0x3fc90fdb,0x3f800000,2
+np.float32,0xbfc90fdb,0xbf800000,2
+np.float32,0x40490fdb,0xb3bbbd2e,2
+np.float32,0xc0490fdb,0x33bbbd2e,2
+np.float32,0x40c90fdb,0x343bbd2e,2
+np.float32,0xc0c90fdb,0xb43bbd2e,2
+np.float32,0x4016cbe4,0x3f3504f3,2
+np.float32,0xc016cbe4,0xbf3504f3,2
+np.float32,0x4096cbe4,0xbf800000,2
+np.float32,0xc096cbe4,0x3f800000,2
+np.float32,0x4116cbe4,0xb2ccde2e,2
+np.float32,0xc116cbe4,0x32ccde2e,2
+np.float32,0x40490fdb,0xb3bbbd2e,2
+np.float32,0xc0490fdb,0x33bbbd2e,2
+np.float32,0x40c90fdb,0x343bbd2e,2
+np.float32,0xc0c90fdb,0xb43bbd2e,2
+np.float32,0x41490fdb,0x34bbbd2e,2
+np.float32,0xc1490fdb,0xb4bbbd2e,2
+np.float32,0x407b53d2,0xbf3504f5,2
+np.float32,0xc07b53d2,0x3f3504f5,2
+np.float32,0x40fb53d2,0x3f800000,2
+np.float32,0xc0fb53d2,0xbf800000,2
+np.float32,0x417b53d2,0xb535563d,2
+np.float32,0xc17b53d2,0x3535563d,2
+np.float32,0x4096cbe4,0xbf800000,2
+np.float32,0xc096cbe4,0x3f800000,2
+np.float32,0x4116cbe4,0xb2ccde2e,2
+np.float32,0xc116cbe4,0x32ccde2e,2
+np.float32,0x4196cbe4,0x334cde2e,2
+np.float32,0xc196cbe4,0xb34cde2e,2
+np.float32,0x40afede0,0xbf3504ef,2
+np.float32,0xc0afede0,0x3f3504ef,2
+np.float32,0x412fede0,0xbf800000,2
+np.float32,0xc12fede0,0x3f800000,2
+np.float32,0x41afede0,0xb5b222c4,2
+np.float32,0xc1afede0,0x35b222c4,2
+np.float32,0x40c90fdb,0x343bbd2e,2
+np.float32,0xc0c90fdb,0xb43bbd2e,2
+np.float32,0x41490fdb,0x34bbbd2e,2
+np.float32,0xc1490fdb,0xb4bbbd2e,2
+np.float32,0x41c90fdb,0x353bbd2e,2
+np.float32,0xc1c90fdb,0xb53bbd2e,2
+np.float32,0x40e231d6,0x3f3504f3,2
+np.float32,0xc0e231d6,0xbf3504f3,2
+np.float32,0x416231d6,0x3f800000,2
+np.float32,0xc16231d6,0xbf800000,2
+np.float32,0x41e231d6,0xb399a6a2,2
+np.float32,0xc1e231d6,0x3399a6a2,2
+np.float32,0x40fb53d2,0x3f800000,2
+np.float32,0xc0fb53d2,0xbf800000,2
+np.float32,0x417b53d2,0xb535563d,2
+np.float32,0xc17b53d2,0x3535563d,2
+np.float32,0x41fb53d2,0x35b5563d,2
+np.float32,0xc1fb53d2,0xb5b5563d,2
+np.float32,0x410a3ae7,0x3f3504eb,2
+np.float32,0xc10a3ae7,0xbf3504eb,2
+np.float32,0x418a3ae7,0xbf800000,2
+np.float32,0xc18a3ae7,0x3f800000,2
+np.float32,0x420a3ae7,0xb6308908,2
+np.float32,0xc20a3ae7,0x36308908,2
+np.float32,0x4116cbe4,0xb2ccde2e,2
+np.float32,0xc116cbe4,0x32ccde2e,2
+np.float32,0x4196cbe4,0x334cde2e,2
+np.float32,0xc196cbe4,0xb34cde2e,2
+np.float32,0x4216cbe4,0x33ccde2e,2
+np.float32,0xc216cbe4,0xb3ccde2e,2
+np.float32,0x41235ce2,0xbf3504f7,2
+np.float32,0xc1235ce2,0x3f3504f7,2
+np.float32,0x41a35ce2,0x3f800000,2
+np.float32,0xc1a35ce2,0xbf800000,2
+np.float32,0x42235ce2,0xb5b889b6,2
+np.float32,0xc2235ce2,0x35b889b6,2
+np.float32,0x412fede0,0xbf800000,2
+np.float32,0xc12fede0,0x3f800000,2
+np.float32,0x41afede0,0xb5b222c4,2
+np.float32,0xc1afede0,0x35b222c4,2
+np.float32,0x422fede0,0x363222c4,2
+np.float32,0xc22fede0,0xb63222c4,2
+np.float32,0x413c7edd,0xbf3504f3,2
+np.float32,0xc13c7edd,0x3f3504f3,2
+np.float32,0x41bc7edd,0xbf800000,2
+np.float32,0xc1bc7edd,0x3f800000,2
+np.float32,0x423c7edd,0xb4000add,2
+np.float32,0xc23c7edd,0x34000add,2
+np.float32,0x41490fdb,0x34bbbd2e,2
+np.float32,0xc1490fdb,0xb4bbbd2e,2
+np.float32,0x41c90fdb,0x353bbd2e,2
+np.float32,0xc1c90fdb,0xb53bbd2e,2
+np.float32,0x42490fdb,0x35bbbd2e,2
+np.float32,0xc2490fdb,0xb5bbbd2e,2
+np.float32,0x4155a0d9,0x3f3504fb,2
+np.float32,0xc155a0d9,0xbf3504fb,2
+np.float32,0x41d5a0d9,0x3f800000,2
+np.float32,0xc1d5a0d9,0xbf800000,2
+np.float32,0x4255a0d9,0xb633bc81,2
+np.float32,0xc255a0d9,0x3633bc81,2
+np.float32,0x416231d6,0x3f800000,2
+np.float32,0xc16231d6,0xbf800000,2
+np.float32,0x41e231d6,0xb399a6a2,2
+np.float32,0xc1e231d6,0x3399a6a2,2
+np.float32,0x426231d6,0x3419a6a2,2
+np.float32,0xc26231d6,0xb419a6a2,2
+np.float32,0x416ec2d4,0x3f3504ef,2
+np.float32,0xc16ec2d4,0xbf3504ef,2
+np.float32,0x41eec2d4,0xbf800000,2
+np.float32,0xc1eec2d4,0x3f800000,2
+np.float32,0x426ec2d4,0xb5bef0a7,2
+np.float32,0xc26ec2d4,0x35bef0a7,2
+np.float32,0x417b53d2,0xb535563d,2
+np.float32,0xc17b53d2,0x3535563d,2
+np.float32,0x41fb53d2,0x35b5563d,2
+np.float32,0xc1fb53d2,0xb5b5563d,2
+np.float32,0x427b53d2,0x3635563d,2
+np.float32,0xc27b53d2,0xb635563d,2
+np.float32,0x4183f268,0xbf3504ff,2
+np.float32,0xc183f268,0x3f3504ff,2
+np.float32,0x4203f268,0x3f800000,2
+np.float32,0xc203f268,0xbf800000,2
+np.float32,0x4283f268,0xb6859a13,2
+np.float32,0xc283f268,0x36859a13,2
+np.float32,0x418a3ae7,0xbf800000,2
+np.float32,0xc18a3ae7,0x3f800000,2
+np.float32,0x420a3ae7,0xb6308908,2
+np.float32,0xc20a3ae7,0x36308908,2
+np.float32,0x428a3ae7,0x36b08908,2
+np.float32,0xc28a3ae7,0xb6b08908,2
+np.float32,0x41908365,0xbf3504f6,2
+np.float32,0xc1908365,0x3f3504f6,2
+np.float32,0x42108365,0xbf800000,2
+np.float32,0xc2108365,0x3f800000,2
+np.float32,0x42908365,0x3592200d,2
+np.float32,0xc2908365,0xb592200d,2
+np.float32,0x4196cbe4,0x334cde2e,2
+np.float32,0xc196cbe4,0xb34cde2e,2
+np.float32,0x4216cbe4,0x33ccde2e,2
+np.float32,0xc216cbe4,0xb3ccde2e,2
+np.float32,0x4296cbe4,0x344cde2e,2
+np.float32,0xc296cbe4,0xb44cde2e,2
+np.float32,0x419d1463,0x3f3504f8,2
+np.float32,0xc19d1463,0xbf3504f8,2
+np.float32,0x421d1463,0x3f800000,2
+np.float32,0xc21d1463,0xbf800000,2
+np.float32,0x429d1463,0xb5c55799,2
+np.float32,0xc29d1463,0x35c55799,2
+np.float32,0x41a35ce2,0x3f800000,2
+np.float32,0xc1a35ce2,0xbf800000,2
+np.float32,0x42235ce2,0xb5b889b6,2
+np.float32,0xc2235ce2,0x35b889b6,2
+np.float32,0x42a35ce2,0x363889b6,2
+np.float32,0xc2a35ce2,0xb63889b6,2
+np.float32,0x41a9a561,0x3f3504e7,2
+np.float32,0xc1a9a561,0xbf3504e7,2
+np.float32,0x4229a561,0xbf800000,2
+np.float32,0xc229a561,0x3f800000,2
+np.float32,0x42a9a561,0xb68733d0,2
+np.float32,0xc2a9a561,0x368733d0,2
+np.float32,0x41afede0,0xb5b222c4,2
+np.float32,0xc1afede0,0x35b222c4,2
+np.float32,0x422fede0,0x363222c4,2
+np.float32,0xc22fede0,0xb63222c4,2
+np.float32,0x42afede0,0x36b222c4,2
+np.float32,0xc2afede0,0xb6b222c4,2
+np.float32,0x41b6365e,0xbf3504f0,2
+np.float32,0xc1b6365e,0x3f3504f0,2
+np.float32,0x4236365e,0x3f800000,2
+np.float32,0xc236365e,0xbf800000,2
+np.float32,0x42b6365e,0x358bb91c,2
+np.float32,0xc2b6365e,0xb58bb91c,2
+np.float32,0x41bc7edd,0xbf800000,2
+np.float32,0xc1bc7edd,0x3f800000,2
+np.float32,0x423c7edd,0xb4000add,2
+np.float32,0xc23c7edd,0x34000add,2
+np.float32,0x42bc7edd,0x34800add,2
+np.float32,0xc2bc7edd,0xb4800add,2
+np.float32,0x41c2c75c,0xbf3504ef,2
+np.float32,0xc1c2c75c,0x3f3504ef,2
+np.float32,0x4242c75c,0xbf800000,2
+np.float32,0xc242c75c,0x3f800000,2
+np.float32,0x42c2c75c,0xb5cbbe8a,2
+np.float32,0xc2c2c75c,0x35cbbe8a,2
+np.float32,0x41c90fdb,0x353bbd2e,2
+np.float32,0xc1c90fdb,0xb53bbd2e,2
+np.float32,0x42490fdb,0x35bbbd2e,2
+np.float32,0xc2490fdb,0xb5bbbd2e,2
+np.float32,0x42c90fdb,0x363bbd2e,2
+np.float32,0xc2c90fdb,0xb63bbd2e,2
+np.float32,0x41cf585a,0x3f3504ff,2
+np.float32,0xc1cf585a,0xbf3504ff,2
+np.float32,0x424f585a,0x3f800000,2
+np.float32,0xc24f585a,0xbf800000,2
+np.float32,0x42cf585a,0xb688cd8c,2
+np.float32,0xc2cf585a,0x3688cd8c,2
+np.float32,0x41d5a0d9,0x3f800000,2
+np.float32,0xc1d5a0d9,0xbf800000,2
+np.float32,0x4255a0d9,0xb633bc81,2
+np.float32,0xc255a0d9,0x3633bc81,2
+np.float32,0x42d5a0d9,0x36b3bc81,2
+np.float32,0xc2d5a0d9,0xb6b3bc81,2
+np.float32,0x41dbe958,0x3f3504e0,2
+np.float32,0xc1dbe958,0xbf3504e0,2
+np.float32,0x425be958,0xbf800000,2
+np.float32,0xc25be958,0x3f800000,2
+np.float32,0x42dbe958,0xb6deab75,2
+np.float32,0xc2dbe958,0x36deab75,2
+np.float32,0x41e231d6,0xb399a6a2,2
+np.float32,0xc1e231d6,0x3399a6a2,2
+np.float32,0x426231d6,0x3419a6a2,2
+np.float32,0xc26231d6,0xb419a6a2,2
+np.float32,0x42e231d6,0x3499a6a2,2
+np.float32,0xc2e231d6,0xb499a6a2,2
+np.float32,0x41e87a55,0xbf3504f8,2
+np.float32,0xc1e87a55,0x3f3504f8,2
+np.float32,0x42687a55,0x3f800000,2
+np.float32,0xc2687a55,0xbf800000,2
+np.float32,0x42e87a55,0xb5d2257b,2
+np.float32,0xc2e87a55,0x35d2257b,2
+np.float32,0x41eec2d4,0xbf800000,2
+np.float32,0xc1eec2d4,0x3f800000,2
+np.float32,0x426ec2d4,0xb5bef0a7,2
+np.float32,0xc26ec2d4,0x35bef0a7,2
+np.float32,0x42eec2d4,0x363ef0a7,2
+np.float32,0xc2eec2d4,0xb63ef0a7,2
+np.float32,0x41f50b53,0xbf3504e7,2
+np.float32,0xc1f50b53,0x3f3504e7,2
+np.float32,0x42750b53,0xbf800000,2
+np.float32,0xc2750b53,0x3f800000,2
+np.float32,0x42f50b53,0xb68a6748,2
+np.float32,0xc2f50b53,0x368a6748,2
+np.float32,0x41fb53d2,0x35b5563d,2
+np.float32,0xc1fb53d2,0xb5b5563d,2
+np.float32,0x427b53d2,0x3635563d,2
+np.float32,0xc27b53d2,0xb635563d,2
+np.float32,0x42fb53d2,0x36b5563d,2
+np.float32,0xc2fb53d2,0xb6b5563d,2
+np.float32,0x4200ce28,0x3f3504f0,2
+np.float32,0xc200ce28,0xbf3504f0,2
+np.float32,0x4280ce28,0x3f800000,2
+np.float32,0xc280ce28,0xbf800000,2
+np.float32,0x4300ce28,0x357dd672,2
+np.float32,0xc300ce28,0xb57dd672,2
+np.float32,0x4203f268,0x3f800000,2
+np.float32,0xc203f268,0xbf800000,2
+np.float32,0x4283f268,0xb6859a13,2
+np.float32,0xc283f268,0x36859a13,2
+np.float32,0x4303f268,0x37059a13,2
+np.float32,0xc303f268,0xb7059a13,2
+np.float32,0x420716a7,0x3f3504ee,2
+np.float32,0xc20716a7,0xbf3504ee,2
+np.float32,0x428716a7,0xbf800000,2
+np.float32,0xc28716a7,0x3f800000,2
+np.float32,0x430716a7,0xb5d88c6d,2
+np.float32,0xc30716a7,0x35d88c6d,2
+np.float32,0x420a3ae7,0xb6308908,2
+np.float32,0xc20a3ae7,0x36308908,2
+np.float32,0x428a3ae7,0x36b08908,2
+np.float32,0xc28a3ae7,0xb6b08908,2
+np.float32,0x430a3ae7,0x37308908,2
+np.float32,0xc30a3ae7,0xb7308908,2
+np.float32,0x420d5f26,0xbf350500,2
+np.float32,0xc20d5f26,0x3f350500,2
+np.float32,0x428d5f26,0x3f800000,2
+np.float32,0xc28d5f26,0xbf800000,2
+np.float32,0x430d5f26,0xb68c0105,2
+np.float32,0xc30d5f26,0x368c0105,2
+np.float32,0x42108365,0xbf800000,2
+np.float32,0xc2108365,0x3f800000,2
+np.float32,0x42908365,0x3592200d,2
+np.float32,0xc2908365,0xb592200d,2
+np.float32,0x43108365,0xb612200d,2
+np.float32,0xc3108365,0x3612200d,2
+np.float32,0x4213a7a5,0xbf3504df,2
+np.float32,0xc213a7a5,0x3f3504df,2
+np.float32,0x4293a7a5,0xbf800000,2
+np.float32,0xc293a7a5,0x3f800000,2
+np.float32,0x4313a7a5,0xb6e1deee,2
+np.float32,0xc313a7a5,0x36e1deee,2
+np.float32,0x4216cbe4,0x33ccde2e,2
+np.float32,0xc216cbe4,0xb3ccde2e,2
+np.float32,0x4296cbe4,0x344cde2e,2
+np.float32,0xc296cbe4,0xb44cde2e,2
+np.float32,0x4316cbe4,0x34ccde2e,2
+np.float32,0xc316cbe4,0xb4ccde2e,2
+np.float32,0x4219f024,0x3f35050f,2
+np.float32,0xc219f024,0xbf35050f,2
+np.float32,0x4299f024,0x3f800000,2
+np.float32,0xc299f024,0xbf800000,2
+np.float32,0x4319f024,0xb71bde6c,2
+np.float32,0xc319f024,0x371bde6c,2
+np.float32,0x421d1463,0x3f800000,2
+np.float32,0xc21d1463,0xbf800000,2
+np.float32,0x429d1463,0xb5c55799,2
+np.float32,0xc29d1463,0x35c55799,2
+np.float32,0x431d1463,0x36455799,2
+np.float32,0xc31d1463,0xb6455799,2
+np.float32,0x422038a3,0x3f3504d0,2
+np.float32,0xc22038a3,0xbf3504d0,2
+np.float32,0x42a038a3,0xbf800000,2
+np.float32,0xc2a038a3,0x3f800000,2
+np.float32,0x432038a3,0xb746cd61,2
+np.float32,0xc32038a3,0x3746cd61,2
+np.float32,0x42235ce2,0xb5b889b6,2
+np.float32,0xc2235ce2,0x35b889b6,2
+np.float32,0x42a35ce2,0x363889b6,2
+np.float32,0xc2a35ce2,0xb63889b6,2
+np.float32,0x43235ce2,0x36b889b6,2
+np.float32,0xc3235ce2,0xb6b889b6,2
+np.float32,0x42268121,0xbf3504f1,2
+np.float32,0xc2268121,0x3f3504f1,2
+np.float32,0x42a68121,0x3f800000,2
+np.float32,0xc2a68121,0xbf800000,2
+np.float32,0x43268121,0x35643aac,2
+np.float32,0xc3268121,0xb5643aac,2
+np.float32,0x4229a561,0xbf800000,2
+np.float32,0xc229a561,0x3f800000,2
+np.float32,0x42a9a561,0xb68733d0,2
+np.float32,0xc2a9a561,0x368733d0,2
+np.float32,0x4329a561,0x370733d0,2
+np.float32,0xc329a561,0xb70733d0,2
+np.float32,0x422cc9a0,0xbf3504ee,2
+np.float32,0xc22cc9a0,0x3f3504ee,2
+np.float32,0x42acc9a0,0xbf800000,2
+np.float32,0xc2acc9a0,0x3f800000,2
+np.float32,0x432cc9a0,0xb5e55a50,2
+np.float32,0xc32cc9a0,0x35e55a50,2
+np.float32,0x422fede0,0x363222c4,2
+np.float32,0xc22fede0,0xb63222c4,2
+np.float32,0x42afede0,0x36b222c4,2
+np.float32,0xc2afede0,0xb6b222c4,2
+np.float32,0x432fede0,0x373222c4,2
+np.float32,0xc32fede0,0xb73222c4,2
+np.float32,0x4233121f,0x3f350500,2
+np.float32,0xc233121f,0xbf350500,2
+np.float32,0x42b3121f,0x3f800000,2
+np.float32,0xc2b3121f,0xbf800000,2
+np.float32,0x4333121f,0xb68f347d,2
+np.float32,0xc333121f,0x368f347d,2
+np.float32,0x4236365e,0x3f800000,2
+np.float32,0xc236365e,0xbf800000,2
+np.float32,0x42b6365e,0x358bb91c,2
+np.float32,0xc2b6365e,0xb58bb91c,2
+np.float32,0x4336365e,0xb60bb91c,2
+np.float32,0xc336365e,0x360bb91c,2
+np.float32,0x42395a9e,0x3f3504df,2
+np.float32,0xc2395a9e,0xbf3504df,2
+np.float32,0x42b95a9e,0xbf800000,2
+np.float32,0xc2b95a9e,0x3f800000,2
+np.float32,0x43395a9e,0xb6e51267,2
+np.float32,0xc3395a9e,0x36e51267,2
+np.float32,0x423c7edd,0xb4000add,2
+np.float32,0xc23c7edd,0x34000add,2
+np.float32,0x42bc7edd,0x34800add,2
+np.float32,0xc2bc7edd,0xb4800add,2
+np.float32,0x433c7edd,0x35000add,2
+np.float32,0xc33c7edd,0xb5000add,2
+np.float32,0x423fa31d,0xbf35050f,2
+np.float32,0xc23fa31d,0x3f35050f,2
+np.float32,0x42bfa31d,0x3f800000,2
+np.float32,0xc2bfa31d,0xbf800000,2
+np.float32,0x433fa31d,0xb71d7828,2
+np.float32,0xc33fa31d,0x371d7828,2
+np.float32,0x4242c75c,0xbf800000,2
+np.float32,0xc242c75c,0x3f800000,2
+np.float32,0x42c2c75c,0xb5cbbe8a,2
+np.float32,0xc2c2c75c,0x35cbbe8a,2
+np.float32,0x4342c75c,0x364bbe8a,2
+np.float32,0xc342c75c,0xb64bbe8a,2
+np.float32,0x4245eb9c,0xbf3504d0,2
+np.float32,0xc245eb9c,0x3f3504d0,2
+np.float32,0x42c5eb9c,0xbf800000,2
+np.float32,0xc2c5eb9c,0x3f800000,2
+np.float32,0x4345eb9c,0xb748671d,2
+np.float32,0xc345eb9c,0x3748671d,2
+np.float32,0x42490fdb,0x35bbbd2e,2
+np.float32,0xc2490fdb,0xb5bbbd2e,2
+np.float32,0x42c90fdb,0x363bbd2e,2
+np.float32,0xc2c90fdb,0xb63bbd2e,2
+np.float32,0x43490fdb,0x36bbbd2e,2
+np.float32,0xc3490fdb,0xb6bbbd2e,2
+np.float32,0x424c341a,0x3f3504f1,2
+np.float32,0xc24c341a,0xbf3504f1,2
+np.float32,0x42cc341a,0x3f800000,2
+np.float32,0xc2cc341a,0xbf800000,2
+np.float32,0x434c341a,0x354a9ee6,2
+np.float32,0xc34c341a,0xb54a9ee6,2
+np.float32,0x424f585a,0x3f800000,2
+np.float32,0xc24f585a,0xbf800000,2
+np.float32,0x42cf585a,0xb688cd8c,2
+np.float32,0xc2cf585a,0x3688cd8c,2
+np.float32,0x434f585a,0x3708cd8c,2
+np.float32,0xc34f585a,0xb708cd8c,2
+np.float32,0x42527c99,0x3f3504ee,2
+np.float32,0xc2527c99,0xbf3504ee,2
+np.float32,0x42d27c99,0xbf800000,2
+np.float32,0xc2d27c99,0x3f800000,2
+np.float32,0x43527c99,0xb5f22833,2
+np.float32,0xc3527c99,0x35f22833,2
+np.float32,0x4255a0d9,0xb633bc81,2
+np.float32,0xc255a0d9,0x3633bc81,2
+np.float32,0x42d5a0d9,0x36b3bc81,2
+np.float32,0xc2d5a0d9,0xb6b3bc81,2
+np.float32,0x4355a0d9,0x3733bc81,2
+np.float32,0xc355a0d9,0xb733bc81,2
+np.float32,0x4258c518,0xbf350500,2
+np.float32,0xc258c518,0x3f350500,2
+np.float32,0x42d8c518,0x3f800000,2
+np.float32,0xc2d8c518,0xbf800000,2
+np.float32,0x4358c518,0xb69267f6,2
+np.float32,0xc358c518,0x369267f6,2
+np.float32,0x425be958,0xbf800000,2
+np.float32,0xc25be958,0x3f800000,2
+np.float32,0x42dbe958,0xb6deab75,2
+np.float32,0xc2dbe958,0x36deab75,2
+np.float32,0x435be958,0x375eab75,2
+np.float32,0xc35be958,0xb75eab75,2
+np.float32,0x425f0d97,0xbf3504df,2
+np.float32,0xc25f0d97,0x3f3504df,2
+np.float32,0x42df0d97,0xbf800000,2
+np.float32,0xc2df0d97,0x3f800000,2
+np.float32,0x435f0d97,0xb6e845e0,2
+np.float32,0xc35f0d97,0x36e845e0,2
+np.float32,0x426231d6,0x3419a6a2,2
+np.float32,0xc26231d6,0xb419a6a2,2
+np.float32,0x42e231d6,0x3499a6a2,2
+np.float32,0xc2e231d6,0xb499a6a2,2
+np.float32,0x436231d6,0x3519a6a2,2
+np.float32,0xc36231d6,0xb519a6a2,2
+np.float32,0x42655616,0x3f35050f,2
+np.float32,0xc2655616,0xbf35050f,2
+np.float32,0x42e55616,0x3f800000,2
+np.float32,0xc2e55616,0xbf800000,2
+np.float32,0x43655616,0xb71f11e5,2
+np.float32,0xc3655616,0x371f11e5,2
+np.float32,0x42687a55,0x3f800000,2
+np.float32,0xc2687a55,0xbf800000,2
+np.float32,0x42e87a55,0xb5d2257b,2
+np.float32,0xc2e87a55,0x35d2257b,2
+np.float32,0x43687a55,0x3652257b,2
+np.float32,0xc3687a55,0xb652257b,2
+np.float32,0x426b9e95,0x3f3504cf,2
+np.float32,0xc26b9e95,0xbf3504cf,2
+np.float32,0x42eb9e95,0xbf800000,2
+np.float32,0xc2eb9e95,0x3f800000,2
+np.float32,0x436b9e95,0xb74a00d9,2
+np.float32,0xc36b9e95,0x374a00d9,2
+np.float32,0x426ec2d4,0xb5bef0a7,2
+np.float32,0xc26ec2d4,0x35bef0a7,2
+np.float32,0x42eec2d4,0x363ef0a7,2
+np.float32,0xc2eec2d4,0xb63ef0a7,2
+np.float32,0x436ec2d4,0x36bef0a7,2
+np.float32,0xc36ec2d4,0xb6bef0a7,2
+np.float32,0x4271e713,0xbf3504f1,2
+np.float32,0xc271e713,0x3f3504f1,2
+np.float32,0x42f1e713,0x3f800000,2
+np.float32,0xc2f1e713,0xbf800000,2
+np.float32,0x4371e713,0x35310321,2
+np.float32,0xc371e713,0xb5310321,2
+np.float32,0x42750b53,0xbf800000,2
+np.float32,0xc2750b53,0x3f800000,2
+np.float32,0x42f50b53,0xb68a6748,2
+np.float32,0xc2f50b53,0x368a6748,2
+np.float32,0x43750b53,0x370a6748,2
+np.float32,0xc3750b53,0xb70a6748,2
+np.float32,0x42782f92,0xbf3504ee,2
+np.float32,0xc2782f92,0x3f3504ee,2
+np.float32,0x42f82f92,0xbf800000,2
+np.float32,0xc2f82f92,0x3f800000,2
+np.float32,0x43782f92,0xb5fef616,2
+np.float32,0xc3782f92,0x35fef616,2
+np.float32,0x427b53d2,0x3635563d,2
+np.float32,0xc27b53d2,0xb635563d,2
+np.float32,0x42fb53d2,0x36b5563d,2
+np.float32,0xc2fb53d2,0xb6b5563d,2
+np.float32,0x437b53d2,0x3735563d,2
+np.float32,0xc37b53d2,0xb735563d,2
+np.float32,0x427e7811,0x3f350500,2
+np.float32,0xc27e7811,0xbf350500,2
+np.float32,0x42fe7811,0x3f800000,2
+np.float32,0xc2fe7811,0xbf800000,2
+np.float32,0x437e7811,0xb6959b6f,2
+np.float32,0xc37e7811,0x36959b6f,2
+np.float32,0x4280ce28,0x3f800000,2
+np.float32,0xc280ce28,0xbf800000,2
+np.float32,0x4300ce28,0x357dd672,2
+np.float32,0xc300ce28,0xb57dd672,2
+np.float32,0x4380ce28,0xb5fdd672,2
+np.float32,0xc380ce28,0x35fdd672,2
+np.float32,0x42826048,0x3f3504de,2
+np.float32,0xc2826048,0xbf3504de,2
+np.float32,0x43026048,0xbf800000,2
+np.float32,0xc3026048,0x3f800000,2
+np.float32,0x43826048,0xb6eb7958,2
+np.float32,0xc3826048,0x36eb7958,2
+np.float32,0x4283f268,0xb6859a13,2
+np.float32,0xc283f268,0x36859a13,2
+np.float32,0x4303f268,0x37059a13,2
+np.float32,0xc303f268,0xb7059a13,2
+np.float32,0x4383f268,0x37859a13,2
+np.float32,0xc383f268,0xb7859a13,2
+np.float32,0x42858487,0xbf3504e2,2
+np.float32,0xc2858487,0x3f3504e2,2
+np.float32,0x43058487,0x3f800000,2
+np.float32,0xc3058487,0xbf800000,2
+np.float32,0x43858487,0x36bea8be,2
+np.float32,0xc3858487,0xb6bea8be,2
+np.float32,0x428716a7,0xbf800000,2
+np.float32,0xc28716a7,0x3f800000,2
+np.float32,0x430716a7,0xb5d88c6d,2
+np.float32,0xc30716a7,0x35d88c6d,2
+np.float32,0x438716a7,0x36588c6d,2
+np.float32,0xc38716a7,0xb6588c6d,2
+np.float32,0x4288a8c7,0xbf3504cf,2
+np.float32,0xc288a8c7,0x3f3504cf,2
+np.float32,0x4308a8c7,0xbf800000,2
+np.float32,0xc308a8c7,0x3f800000,2
+np.float32,0x4388a8c7,0xb74b9a96,2
+np.float32,0xc388a8c7,0x374b9a96,2
+np.float32,0x428a3ae7,0x36b08908,2
+np.float32,0xc28a3ae7,0xb6b08908,2
+np.float32,0x430a3ae7,0x37308908,2
+np.float32,0xc30a3ae7,0xb7308908,2
+np.float32,0x438a3ae7,0x37b08908,2
+np.float32,0xc38a3ae7,0xb7b08908,2
+np.float32,0x428bcd06,0x3f3504f2,2
+np.float32,0xc28bcd06,0xbf3504f2,2
+np.float32,0x430bcd06,0x3f800000,2
+np.float32,0xc30bcd06,0xbf800000,2
+np.float32,0x438bcd06,0x3517675b,2
+np.float32,0xc38bcd06,0xb517675b,2
+np.float32,0x428d5f26,0x3f800000,2
+np.float32,0xc28d5f26,0xbf800000,2
+np.float32,0x430d5f26,0xb68c0105,2
+np.float32,0xc30d5f26,0x368c0105,2
+np.float32,0x438d5f26,0x370c0105,2
+np.float32,0xc38d5f26,0xb70c0105,2
+np.float32,0x428ef146,0x3f3504c0,2
+np.float32,0xc28ef146,0xbf3504c0,2
+np.float32,0x430ef146,0xbf800000,2
+np.float32,0xc30ef146,0x3f800000,2
+np.float32,0x438ef146,0xb790bc40,2
+np.float32,0xc38ef146,0x3790bc40,2
+np.float32,0x42908365,0x3592200d,2
+np.float32,0xc2908365,0xb592200d,2
+np.float32,0x43108365,0xb612200d,2
+np.float32,0xc3108365,0x3612200d,2
+np.float32,0x43908365,0xb692200d,2
+np.float32,0xc3908365,0x3692200d,2
+np.float32,0x42921585,0xbf350501,2
+np.float32,0xc2921585,0x3f350501,2
+np.float32,0x43121585,0x3f800000,2
+np.float32,0xc3121585,0xbf800000,2
+np.float32,0x43921585,0xb698cee8,2
+np.float32,0xc3921585,0x3698cee8,2
+np.float32,0x4293a7a5,0xbf800000,2
+np.float32,0xc293a7a5,0x3f800000,2
+np.float32,0x4313a7a5,0xb6e1deee,2
+np.float32,0xc313a7a5,0x36e1deee,2
+np.float32,0x4393a7a5,0x3761deee,2
+np.float32,0xc393a7a5,0xb761deee,2
+np.float32,0x429539c5,0xbf3504b1,2
+np.float32,0xc29539c5,0x3f3504b1,2
+np.float32,0x431539c5,0xbf800000,2
+np.float32,0xc31539c5,0x3f800000,2
+np.float32,0x439539c5,0xb7bbab34,2
+np.float32,0xc39539c5,0x37bbab34,2
+np.float32,0x4296cbe4,0x344cde2e,2
+np.float32,0xc296cbe4,0xb44cde2e,2
+np.float32,0x4316cbe4,0x34ccde2e,2
+np.float32,0xc316cbe4,0xb4ccde2e,2
+np.float32,0x4396cbe4,0x354cde2e,2
+np.float32,0xc396cbe4,0xb54cde2e,2
+np.float32,0x42985e04,0x3f350510,2
+np.float32,0xc2985e04,0xbf350510,2
+np.float32,0x43185e04,0x3f800000,2
+np.float32,0xc3185e04,0xbf800000,2
+np.float32,0x43985e04,0xb722455d,2
+np.float32,0xc3985e04,0x3722455d,2
+np.float32,0x4299f024,0x3f800000,2
+np.float32,0xc299f024,0xbf800000,2
+np.float32,0x4319f024,0xb71bde6c,2
+np.float32,0xc319f024,0x371bde6c,2
+np.float32,0x4399f024,0x379bde6c,2
+np.float32,0xc399f024,0xb79bde6c,2
+np.float32,0x429b8243,0x3f3504fc,2
+np.float32,0xc29b8243,0xbf3504fc,2
+np.float32,0x431b8243,0xbf800000,2
+np.float32,0xc31b8243,0x3f800000,2
+np.float32,0x439b8243,0x364b2eb8,2
+np.float32,0xc39b8243,0xb64b2eb8,2
+np.float32,0x435b2047,0xbf350525,2
+np.float32,0x42a038a2,0xbf800000,2
+np.float32,0x432038a2,0x3664ca7e,2
+np.float32,0x4345eb9b,0x365e638c,2
+np.float32,0x42c5eb9b,0xbf800000,2
+np.float32,0x42eb9e94,0xbf800000,2
+np.float32,0x4350ea79,0x3f800000,2
+np.float32,0x42dbe957,0x3585522a,2
+np.float32,0x425be957,0xbf800000,2
+np.float32,0x435be957,0xb605522a,2
+np.float32,0x487fe5ab,0xbf7ffffd,2
+np.float32,0x497fe5ab,0xbb14017d,2
+np.float32,0x49ffe5ab,0xbb940164,2
+np.float32,0x49ffeb37,0x3f7fff56,2
+np.float32,0x497ff0c3,0x3f7fffd6,2
+np.float32,0x49fff0c3,0x3b930487,2
+np.float32,0x49fff64f,0xbf7fff58,2
+np.float32,0x497ffbdb,0x3b1207c0,2
+np.float32,0x49fffbdb,0xbb9207a9,2
+np.float32,0x48fffbdb,0xbf7ffff6,2
+np.float32,0x4e736e56,0x397fa7f2,2
+np.float32,0x4d4da377,0xb57c64bc,2
+np.float32,0x4ece58c3,0xb80846c8,2
+np.float32,0x4ee0db9c,0x394c4786,2
+np.float32,0x4dee7002,0x381bce96,2
+np.float32,0x4ee86afc,0x3f800000,2
+np.float32,0x4dca4f3f,0xb8e25111,2
+np.float32,0x4ecb48af,0xbf800000,2
+np.float32,0x4e51e33f,0xb8a4fa6f,2
+np.float32,0x4ef5f421,0x387ca7df,2
+np.float32,0x476362a2,0xbd7ff911,2
+np.float32,0x464c99a4,0x3e7f4d41,2
+np.float32,0x4471f73d,0x3e7fe1b0,2
+np.float32,0x445a6752,0x3e7ef367,2
+np.float32,0x474fa400,0x3e7f9fcd,2
+np.float32,0x47c9e70e,0xbb4bba09,2
+np.float32,0x45c1e72f,0xbe7fc7af,2
+np.float32,0x4558c91d,0x3e7e9f31,2
+np.float32,0x43784f94,0xbdff6654,2
+np.float32,0x466e8500,0xbe7ea0a3,2
+np.float32,0x468e1c25,0x3e7e22fb,2
+np.float32,0x47d28adc,0xbe7d5e6b,2
+np.float32,0x44ea6cfc,0x3dff70c3,2
+np.float32,0x4605126c,0x3e7f89ef,2
+np.float32,0x4788b3c6,0xbb87d853,2
+np.float32,0x4531b042,0x3dffd163,2
+np.float32,0x47e46c29,0xbe7def2b,2
+np.float32,0x47c10e07,0xbdff63d4,2
+np.float32,0x43f1f71d,0x3dfff387,2
+np.float32,0x47c3e38c,0x3e7f0b2f,2
+np.float32,0x462c3fa5,0xbd7fe13d,2
+np.float32,0x441c5354,0xbdff76b4,2
+np.float32,0x44908b69,0x3e7dcf0d,2
+np.float32,0x478813ad,0xbe7e9d80,2
+np.float32,0x441c4351,0x3dff937b,2
diff --git a/numpy/core/tests/test__exceptions.py b/numpy/core/tests/test__exceptions.py
new file mode 100644
index 000000000..494b51f34
--- /dev/null
+++ b/numpy/core/tests/test__exceptions.py
@@ -0,0 +1,42 @@
+"""
+Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
+"""
+import numpy as np
+
+_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
+
+class TestArrayMemoryError:
+ def test_str(self):
+ e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
+ str(e) # not crashing is enough
+
+ # testing these properties is easier than testing the full string repr
+ def test__size_to_string(self):
+ """ Test e._size_to_string """
+ f = _ArrayMemoryError._size_to_string
+ Ki = 1024
+ assert f(0) == '0 bytes'
+ assert f(1) == '1 bytes'
+ assert f(1023) == '1023 bytes'
+ assert f(Ki) == '1.00 KiB'
+ assert f(Ki+1) == '1.00 KiB'
+ assert f(10*Ki) == '10.0 KiB'
+ assert f(int(999.4*Ki)) == '999. KiB'
+ assert f(int(1023.4*Ki)) == '1023. KiB'
+ assert f(int(1023.5*Ki)) == '1.00 MiB'
+ assert f(Ki*Ki) == '1.00 MiB'
+
+ # 1023.9999 Mib should round to 1 GiB
+ assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
+ # larger than sys.maxsize, adding larger prefices isn't going to help
+ # anyway.
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
+
+ def test__total_size(self):
+ """ Test e._total_size """
+ e = _ArrayMemoryError((1,), np.dtype(np.uint8))
+ assert e._total_size == 1
+
+ e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
+ assert e._total_size == 1024
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index f2b8fdca7..702e68e76 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -262,11 +262,6 @@ class TestArray2String(object):
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
- # check for backcompat that using FloatFormat works and emits warning
- with assert_warns(DeprecationWarning):
- fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False)
- assert_equal(np.array2string(x, formatter={'float_kind': fmt}),
- '[0. 1. 2.]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
@@ -847,9 +842,9 @@ class TestPrintOptions(object):
)
def test_bad_args(self):
- assert_raises(ValueError, np.set_printoptions, threshold='nan')
- assert_raises(ValueError, np.set_printoptions, threshold=u'1')
- assert_raises(ValueError, np.set_printoptions, threshold=b'1')
+ assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
+ assert_raises(TypeError, np.set_printoptions, threshold='1')
+ assert_raises(TypeError, np.set_printoptions, threshold=b'1')
def test_unicode_object_array():
import sys
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 6d71fcbd6..b12b71940 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -10,12 +10,16 @@ import sys
import operator
import warnings
import pytest
+import shutil
+import tempfile
import numpy as np
from numpy.testing import (
- assert_raises, assert_warns, assert_
+ assert_raises, assert_warns, assert_, assert_array_equal
)
+from numpy.core._multiarray_tests import fromstring_null_term_c_api
+
try:
import pytz
_has_pytz = True
@@ -101,7 +105,7 @@ class _DeprecationTestCase(object):
(self.warning_cls.__name__, warning.category))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
- lst = [str(w.category) for w in self.log]
+ lst = [str(w) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
@@ -149,16 +153,6 @@ class TestNonTupleNDIndexDeprecation(object):
a[[0, 1]]
-class TestRankDeprecation(_DeprecationTestCase):
- """Test that np.rank is deprecated. The function should simply be
- removed. The VisibleDeprecationWarning may become unnecessary.
- """
-
- def test(self):
- a = np.arange(10)
- assert_warns(np.VisibleDeprecationWarning, np.rank, a)
-
-
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
@@ -452,6 +446,18 @@ class TestNPY_CHAR(_DeprecationTestCase):
assert_(npy_char_deprecation() == 'S1')
+class TestPyArray_AS1D(_DeprecationTestCase):
+ def test_npy_pyarrayas1d_deprecation(self):
+ from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
+ assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
+
+
+class TestPyArray_AS2D(_DeprecationTestCase):
+ def test_npy_pyarrayas2d_deprecation(self):
+ from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
+ assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
+
+
class Test_UPDATEIFCOPY(_DeprecationTestCase):
"""
v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
@@ -499,6 +505,12 @@ class TestBincount(_DeprecationTestCase):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
+class TestAlen(_DeprecationTestCase):
+ # 2019-08-02, 1.18.0
+ def test_alen(self):
+ self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
+
+
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
@@ -518,11 +530,65 @@ class TestPositiveOnNonNumerical(_DeprecationTestCase):
def test_positive_on_non_number(self):
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
+
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
+
+class TestFromStringAndFileInvalidData(_DeprecationTestCase):
+ # 2019-06-08, 1.17.0
+ # Tests should be moved to real tests when deprecation is done.
+ message = "string or file could not be read to its end"
+
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+ def test_deprecate_unparsable_data_file(self, invalid_str):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+
+ with tempfile.TemporaryFile(mode="w") as f:
+ x.tofile(f, sep=',', format='%.2f')
+ f.write(invalid_str)
+
+ f.seek(0)
+ self.assert_deprecated(lambda: np.fromfile(f, sep=","))
+ f.seek(0)
+ self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
+ # Should not raise:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ f.seek(0)
+ res = np.fromfile(f, sep=",", count=4)
+ assert_array_equal(res, x)
+
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+ def test_deprecate_unparsable_string(self, invalid_str):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+ x_str = "1.51,2,3.51,4{}".format(invalid_str)
+
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
+
+ # The C-level API can use not fixed size, but 0 terminated strings,
+ # so test that as well:
+ bytestr = x_str.encode("ascii")
+ self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
+
+ with assert_warns(DeprecationWarning):
+ # this is slightly strange, in that fromstring leaves data
+ # potentially uninitialized (would be good to error when all is
+ # read, but count is larger then actual data maybe).
+ res = np.fromstring(x_str, sep=",", count=5)
+ assert_array_equal(res[:-1], x)
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+
+ # Should not raise:
+ res = np.fromstring(x_str, sep=",", count=4)
+ assert_array_equal(res, x)
+
+
class Test_GetSet_NumericOps(_DeprecationTestCase):
# 2018-09-20, 1.16.0
def test_get_numeric_ops(self):
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index f60eab696..d2fbbae5b 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -419,6 +419,31 @@ class TestRecord(object):
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
+ def test_fieldless_views(self):
+ a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
+ 'itemsize':8})
+ assert_raises(ValueError, a.view, np.dtype([]))
+
+ d = np.dtype((np.dtype([]), 10))
+ assert_equal(d.shape, (10,))
+ assert_equal(d.itemsize, 0)
+ assert_equal(d.base, np.dtype([]))
+
+ arr = np.fromiter((() for i in range(10)), [])
+ assert_equal(arr.dtype, np.dtype([]))
+ assert_raises(ValueError, np.frombuffer, b'', dtype=[])
+ assert_equal(np.frombuffer(b'', dtype=[], count=2),
+ np.empty(2, dtype=[]))
+
+ assert_raises(ValueError, np.dtype, ([], 'f8'))
+ assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
+
+ assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
+ np.ones(2, dtype=bool))
+
+ assert_equal(np.zeros((1, 2), dtype=[]) == a,
+ np.ones((1, 2), dtype=bool))
+
class TestSubarray(object):
def test_single_subarray(self):
@@ -938,13 +963,6 @@ class TestDtypeAttributes(object):
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
- @pytest.mark.parametrize('t', np.typeDict.values())
- def test_name_builtin(self, t):
- name = t.__name__
- if name.endswith('_'):
- name = name[:-1]
- assert_equal(np.dtype(t).name, name)
-
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index f7485c3f7..70a5a246f 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -617,6 +617,19 @@ class TestSubclasses(object):
assert_array_equal(s_bool, a[a > 0])
assert_array_equal(s_bool.base, a[a > 0])
+ def test_fancy_on_read_only(self):
+ # Test that fancy indexing on read-only SubClass does not make a
+ # read-only copy (gh-14132)
+ class SubClass(np.ndarray):
+ pass
+
+ a = np.arange(5)
+ s = a.view(SubClass)
+ s.flags.writeable = False
+ s_fancy = s[[0, 1, 2]]
+ assert_(s_fancy.flags.writeable)
+
+
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index 7fb542ee1..8e1c9d153 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -5,7 +5,8 @@ import pytest
import numpy as np
from numpy.testing import (
- assert_, assert_equal, assert_raises, assert_array_equal, temppath,
+ assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
+ temppath,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
@@ -84,18 +85,21 @@ def test_fromstring_complex():
def test_fromstring_bogus():
- assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
- np.array([1., 2., 3.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
+ np.array([1., 2., 3.]))
def test_fromstring_empty():
- assert_equal(np.fromstring("xxxxx", sep="x"),
- np.array([]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("xxxxx", sep="x"),
+ np.array([]))
def test_fromstring_missing():
- assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
- np.array([1]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
+ np.array([1]))
class TestFileBased(object):
@@ -108,7 +112,9 @@ class TestFileBased(object):
with temppath() as path:
with open(path, 'wt') as f:
f.write("1. 2. 3. flop 4.\n")
- res = np.fromfile(path, dtype=float, sep=" ")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
@pytest.mark.skipif(string_to_longdouble_inaccurate,
@@ -199,12 +205,14 @@ class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
assert_equal(a[0], f)
def test_fromstring_best_effort_float(self):
- assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
- np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
+ np.array([1.]))
def test_fromstring_best_effort(self):
- assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
- np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
+ np.array([1.]))
def test_fromstring_foreign(self):
s = "1.234"
@@ -217,8 +225,10 @@ class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
assert_array_equal(a, b)
def test_fromstring_foreign_value(self):
- b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
- assert_array_equal(b[0], 1)
+ with assert_warns(DeprecationWarning):
+ b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
+ assert_array_equal(b[0], 1)
+
@pytest.mark.parametrize("int_val", [
# cases discussed in gh-10723
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 53e538f7d..58572f268 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -44,7 +44,7 @@ from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
- temppath, suppress_warnings, break_cycles, assert_raises_regex,
+ temppath, suppress_warnings, break_cycles,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
@@ -497,9 +497,6 @@ class TestArrayConstruction(object):
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
- def test_ragged(self):
- assert_raises_regex(ValueError, 'ragged',
- np.array, [[1], [2, 3]], dtype=int)
class TestAssignment(object):
def test_assignment_broadcasting(self):
@@ -4590,18 +4587,26 @@ class TestTake(object):
assert_equal(y, np.array([1, 2, 3]))
class TestLexsort(object):
- def test_basic(self):
- a = [1, 2, 1, 3, 1, 5]
- b = [0, 4, 5, 6, 2, 3]
+ @pytest.mark.parametrize('dtype',[
+ np.uint8, np.uint16, np.uint32, np.uint64,
+ np.int8, np.int16, np.int32, np.int64,
+ np.float16, np.float32, np.float64
+ ])
+ def test_basic(self, dtype):
+ a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype)
+ b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype)
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
+ assert_array_equal(a[idx], np.sort(a))
- x = np.vstack((b, a))
- idx = np.lexsort(x)
- assert_array_equal(idx, expected_idx)
+ def test_mixed(self):
+ a = np.array([1, 2, 1, 3, 1, 5])
+ b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]')
- assert_array_equal(x[1][idx], np.sort(x[1]))
+ idx = np.lexsort((b, a))
+ expected_idx = np.array([0, 4, 2, 1, 3, 5])
+ assert_array_equal(idx, expected_idx)
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
@@ -4958,7 +4963,8 @@ class TestIO(object):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
- self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
+ with assert_warns(DeprecationWarning):
+ self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
@@ -6271,6 +6277,23 @@ class TestMatmul(MatmulCommon):
with assert_raises(TypeError):
b = np.matmul(a, a)
+ def test_matmul_bool(self):
+ # gh-14439
+ a = np.array([[1, 0],[1, 1]], dtype=bool)
+ assert np.max(a.view(np.uint8)) == 1
+ b = np.matmul(a, a)
+ # matmul with boolean output should always be 0, 1
+ assert np.max(b.view(np.uint8)) == 1
+
+ rg = np.random.default_rng(np.random.PCG64(43))
+ d = rg.integers(2, size=4*5, dtype=np.int8)
+ d = d.reshape(4, 5) > 0
+ out1 = np.matmul(d, d.reshape(5, 4))
+ out2 = np.dot(d, d.reshape(5, 4))
+ assert_equal(out1, out2)
+
+ c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))
+ assert not np.any(c)
if sys.version_info[:2] >= (3, 5):
@@ -6405,20 +6428,22 @@ class TestInner(object):
class TestAlen(object):
def test_basic(self):
- m = np.array([1, 2, 3])
- assert_equal(np.alen(m), 3)
+ with pytest.warns(DeprecationWarning):
+ m = np.array([1, 2, 3])
+ assert_equal(np.alen(m), 3)
- m = np.array([[1, 2, 3], [4, 5, 7]])
- assert_equal(np.alen(m), 2)
+ m = np.array([[1, 2, 3], [4, 5, 7]])
+ assert_equal(np.alen(m), 2)
- m = [1, 2, 3]
- assert_equal(np.alen(m), 3)
+ m = [1, 2, 3]
+ assert_equal(np.alen(m), 3)
- m = [[1, 2, 3], [4, 5, 7]]
- assert_equal(np.alen(m), 2)
+ m = [[1, 2, 3], [4, 5, 7]]
+ assert_equal(np.alen(m), 2)
def test_singleton(self):
- assert_equal(np.alen(5), 1)
+ with pytest.warns(DeprecationWarning):
+ assert_equal(np.alen(5), 1)
class TestChoose(object):
@@ -8080,6 +8105,8 @@ class TestWritebackIfCopy(object):
arr_wb[...] = 100
assert_equal(arr, -100)
+ @pytest.mark.leaks_references(
+ reason="increments self in dealloc; ignore since deprecated path.")
def test_dealloc_warning(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 3e85054b7..1358b45e9 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1341,6 +1341,11 @@ class TestBinaryRepr(object):
exp = '1' + (width - 1) * '0'
assert_equal(np.binary_repr(num, width=width), exp)
+ def test_large_neg_int64(self):
+ # See gh-14289.
+ assert_equal(np.binary_repr(np.int64(-2**62), width=64),
+ '11' + '0'*62)
+
class TestBaseRepr(object):
def test_base3(self):
@@ -2578,6 +2583,30 @@ class TestConvolve(object):
class TestArgwhere(object):
+
+ @pytest.mark.parametrize('nd', [0, 1, 2])
+ def test_nd(self, nd):
+ # get an nd array with multiple elements in every dimension
+ x = np.empty((2,)*nd, bool)
+
+ # none
+ x[...] = False
+ assert_equal(np.argwhere(x).shape, (0, nd))
+
+ # only one
+ x[...] = False
+ x.flat[0] = True
+ assert_equal(np.argwhere(x).shape, (1, nd))
+
+ # all but one
+ x[...] = True
+ x.flat[0] = False
+ assert_equal(np.argwhere(x).shape, (x.size - 1, nd))
+
+ # all
+ x[...] = True
+ assert_equal(np.argwhere(x).shape, (x.size, nd))
+
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index d0ff5578a..387740e35 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -498,3 +498,32 @@ class TestDocStrings(object):
assert_('int64' in np.int_.__doc__)
elif np.int64 is np.longlong:
assert_('int64' in np.longlong.__doc__)
+
+
+class TestScalarTypeNames:
+ # gh-9799
+
+ numeric_types = [
+ np.byte, np.short, np.intc, np.int_, np.longlong,
+ np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong,
+ np.half, np.single, np.double, np.longdouble,
+ np.csingle, np.cdouble, np.clongdouble,
+ ]
+
+ def test_names_are_unique(self):
+ # none of the above may be aliases for each other
+ assert len(set(self.numeric_types)) == len(self.numeric_types)
+
+ # names must be unique
+ names = [t.__name__ for t in self.numeric_types]
+ assert len(set(names)) == len(names)
+
+ @pytest.mark.parametrize('t', numeric_types)
+ def test_names_reflect_attributes(self, t):
+ """ Test that names correspond to where the type is under ``np.`` """
+ assert getattr(np, t.__name__) is t
+
+ @pytest.mark.parametrize('t', numeric_types)
+ def test_names_are_undersood_by_dtype(self, t):
+ """ Test the dtype constructor maps names back to the type """
+ assert np.dtype(t.__name__).type is t
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 14413224e..c1b794145 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -444,6 +444,48 @@ class TestRecord(object):
]
arr = np.rec.fromarrays(arrays) # ValueError?
+ @pytest.mark.parametrize('nfields', [0, 1, 2])
+ def test_assign_dtype_attribute(self, nfields):
+ dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
+ data = np.zeros(3, dt).view(np.recarray)
+
+ # the original and resulting dtypes differ on whether they are records
+ assert data.dtype.type == np.record
+ assert dt.type != np.record
+
+ # ensure that the dtype remains a record even when assigned
+ data.dtype = dt
+ assert data.dtype.type == np.record
+
+ @pytest.mark.parametrize('nfields', [0, 1, 2])
+ def test_nested_fields_are_records(self, nfields):
+ """ Test that nested structured types are treated as records too """
+ dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
+ dt_outer = np.dtype([('inner', dt)])
+
+ data = np.zeros(3, dt_outer).view(np.recarray)
+ assert isinstance(data, np.recarray)
+ assert isinstance(data['inner'], np.recarray)
+
+ data0 = data[0]
+ assert isinstance(data0, np.record)
+ assert isinstance(data0['inner'], np.record)
+
+ def test_nested_dtype_padding(self):
+ """ test that trailing padding is preserved """
+ # construct a dtype with padding at the end
+ dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)])
+ dt_padded_end = dt[['a', 'b']]
+ assert dt_padded_end.itemsize == dt.itemsize
+
+ dt_outer = np.dtype([('inner', dt_padded_end)])
+
+ data = np.zeros(3, dt_outer).view(np.recarray)
+ assert_equal(data['inner'].dtype, dt_padded_end)
+
+ data0 = data[0]
+ assert_equal(data0['inner'].dtype, dt_padded_end)
+
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index e564ae300..9dc231deb 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -436,6 +436,32 @@ class TestRegression(object):
assert_raises(KeyError, np.lexsort, BuggySequence())
+ def test_lexsort_zerolen_custom_strides(self):
+ # Ticket #14228
+ xs = np.array([], dtype='i8')
+ assert xs.strides == (8,)
+ assert np.lexsort((xs,)).shape[0] == 0 # Works
+
+ xs.strides = (16,)
+ assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError
+
+ def test_lexsort_zerolen_custom_strides_2d(self):
+ xs = np.array([], dtype='i8')
+
+ xs.shape = (0, 2)
+ xs.strides = (16, 16)
+ assert np.lexsort((xs,), axis=0).shape[0] == 0
+
+ xs.shape = (2, 0)
+ xs.strides = (16, 16)
+ assert np.lexsort((xs,), axis=0).shape[0] == 2
+
+ def test_lexsort_zerolen_element(self):
+ dt = np.dtype([]) # a void dtype with no fields
+ xs = np.empty(4, dt)
+
+ assert np.lexsort((xs,)).shape[0] == xs.shape[0]
+
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
@@ -468,7 +494,7 @@ class TestRegression(object):
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
- if isinstance(result, np.ndarray) and result.dtype.names:
+ if isinstance(result, np.ndarray) and result.dtype.names is not None:
for name in result.dtype.names:
assert_(isinstance(name, str))
@@ -1513,7 +1539,8 @@ class TestRegression(object):
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
- np.fromstring(b'aa, aa, 1.0', sep=',')
+ with assert_warns(DeprecationWarning):
+ np.fromstring(b'aa, aa, 1.0', sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
@@ -2474,3 +2501,13 @@ class TestRegression(object):
t = T()
#gh-13659, would raise in broadcasting [x=t for x in result]
np.array([t])
+
+ @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
+ @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8),
+ reason='overflows on windows, fixed in bpo-16865')
+ def test_to_ctypes(self):
+ #gh-14214
+ arr = np.zeros((2 ** 31 + 1,), 'b')
+ assert arr.size * arr.itemsize > 2 ** 31
+ c_arr = np.ctypeslib.as_ctypes(arr)
+ assert_equal(c_arr._length_, arr.size)
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index ebba457e3..854df5590 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -664,3 +664,31 @@ class TestAbs(object):
def test_numpy_abs(self):
self._test_abs_func(np.abs)
+
+
+class TestBitShifts(object):
+
+ @pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
+ @pytest.mark.parametrize('op',
+ [operator.rshift, operator.lshift], ids=['>>', '<<'])
+ def test_shift_all_bits(self, type_code, op):
+ """ Shifts where the shift amount is the width of the type or wider """
+ # gh-2449
+ dt = np.dtype(type_code)
+ nbits = dt.itemsize * 8
+ for val in [5, -5]:
+ for shift in [nbits, nbits + 4]:
+ val_scl = dt.type(val)
+ shift_scl = dt.type(shift)
+ res_scl = op(val_scl, shift_scl)
+ if val_scl < 0 and op is operator.rshift:
+ # sign bit is preserved
+ assert_equal(res_scl, -1)
+ else:
+ assert_equal(res_scl, 0)
+
+ # Result on scalars should be the same as on arrays
+ val_arr = np.array([val]*32, dtype=dt)
+ shift_arr = np.array([shift]*32, dtype=dt)
+ res_arr = op(val_arr, shift_arr)
+ assert_equal(res_arr, res_scl)
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 69fbc35e3..707c690dd 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1933,4 +1933,17 @@ def test_ufunc_noncontiguous(ufunc):
warnings.filterwarnings("always")
res_c = ufunc(*args_c)
res_n = ufunc(*args_n)
- assert_equal(res_c, res_n)
+ if len(out) == 1:
+ res_c = (res_c,)
+ res_n = (res_n,)
+ for c_ar, n_ar in zip(res_c, res_n):
+ dt = c_ar.dtype
+ if np.issubdtype(dt, np.floating):
+ # for floating point results allow a small fuss in comparisons
+ # since different algorithms (libm vs. intrinsics) can be used
+ # for different input strides
+ res_eps = np.finfo(dt).eps
+ tol = 2*res_eps
+ assert_allclose(res_c, res_n, atol=tol, rtol=tol)
+ else:
+ assert_equal(c_ar, n_ar)
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index d2ce74282..ef48fed05 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -14,7 +14,7 @@ from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
- _gen_alignment_data
+ _gen_alignment_data, assert_array_almost_equal_nulp
)
def on_powerpc():
@@ -678,20 +678,49 @@ class TestSpecialFloats(object):
assert_raises(FloatingPointError, np.log, np.float32(-np.inf))
assert_raises(FloatingPointError, np.log, np.float32(-1.0))
-class TestExpLogFloat32(object):
+ def test_sincos_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, np.nan, np.nan]
+ y = [np.nan, -np.nan, np.inf, -np.inf]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.sin(yf), xf)
+ assert_equal(np.cos(yf), xf)
+
+ with np.errstate(invalid='raise'):
+ assert_raises(FloatingPointError, np.sin, np.float32(-np.inf))
+ assert_raises(FloatingPointError, np.sin, np.float32(np.inf))
+ assert_raises(FloatingPointError, np.cos, np.float32(-np.inf))
+ assert_raises(FloatingPointError, np.cos, np.float32(np.inf))
+
+
+class TestSIMDFloat32(object):
def test_exp_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
x_f64 = np.float64(x_f32)
- assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=2.6)
+ assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)
def test_log_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))
x_f64 = np.float64(x_f32)
- assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=3.9)
+ assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)
+
+ def test_sincos_float32(self):
+ np.random.seed(42)
+ N = 1000000
+ M = np.int(N/20)
+ index = np.random.randint(low=0, high=N, size=M)
+ x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
+ # test coverage for elements > 117435.992f for which glibc is used
+ x_f32[index] = np.float32(10E+10*np.random.rand(M))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)
+ assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)
- def test_strided_exp_log_float32(self):
+ def test_strided_float32(self):
np.random.seed(42)
strides = np.random.randint(low=-100, high=100, size=100)
sizes = np.random.randint(low=1, high=2000, size=100)
@@ -699,9 +728,13 @@ class TestExpLogFloat32(object):
x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
exp_true = np.exp(x_f32)
log_true = np.log(x_f32)
+ sin_true = np.sin(x_f32)
+ cos_true = np.cos(x_f32)
for jj in strides:
- assert_equal(np.exp(x_f32[::jj]), exp_true[::jj])
- assert_equal(np.log(x_f32[::jj]), log_true[::jj])
+ assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.sin(x_f32[::jj]), sin_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.cos(x_f32[::jj]), cos_true[::jj], nulp=2)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py
new file mode 100644
index 000000000..0bab04df2
--- /dev/null
+++ b/numpy/core/tests/test_umath_accuracy.py
@@ -0,0 +1,54 @@
+import numpy as np
+import platform
+from os import path
+import sys
+import pytest
+from ctypes import *
+from numpy.testing import assert_array_max_ulp
+
+runtest = sys.platform.startswith('linux') and (platform.machine() == 'x86_64')
+platform_skip = pytest.mark.skipif(not runtest,
+ reason="""
+ stick to x86_64 and linux platforms.
+ test seems to fail on some of ARM and power
+ architectures.
+ """)
+
+# convert string to hex function taken from:
+# https://stackoverflow.com/questions/1592158/convert-hex-to-float #
+def convert(s):
+ i = int(s, 16) # convert from hex to a Python int
+ cp = pointer(c_int(i)) # make this into a c integer
+ fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer
+ return fp.contents.value # dereference the pointer, get the float
+
+str_to_float = np.vectorize(convert)
+files = ['umath-validation-set-exp',
+ 'umath-validation-set-log',
+ 'umath-validation-set-sin',
+ 'umath-validation-set-cos']
+
+class TestAccuracy(object):
+ @pytest.mark.xfail(reason="Fails for MacPython/numpy-wheels builds")
+ def test_validate_transcendentals(self):
+ with np.errstate(all='ignore'):
+ for filename in files:
+ data_dir = path.join(path.dirname(__file__), 'data')
+ filepath = path.join(data_dir, filename)
+ with open(filepath) as fid:
+ file_without_comments = (r for r in fid if not r[0] in ('$', '#'))
+ data = np.genfromtxt(file_without_comments,
+ dtype=('|S39','|S39','|S39',np.int),
+ names=('type','input','output','ulperr'),
+ delimiter=',',
+ skip_header=1)
+ npfunc = getattr(np, filename.split('-')[3])
+ for datatype in np.unique(data['type']):
+ data_subset = data[data['type'] == datatype]
+ inval = np.array(str_to_float(data_subset['input'].astype(str)), dtype=eval(datatype))
+ outval = np.array(str_to_float(data_subset['output'].astype(str)), dtype=eval(datatype))
+ perm = np.random.permutation(len(inval))
+ inval = inval[perm]
+ outval = outval[perm]
+ maxulperr = data_subset['ulperr'].max()
+ assert_array_max_ulp(npfunc(inval), outval, maxulperr)
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 1f842d003..58f3ef9d3 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -92,11 +92,11 @@ else:
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
"""
- It is possible to load a library using
+ It is possible to load a library using
>>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
But there are cross-platform considerations, such as library file extensions,
- plus the fact Windows will just load the first library it finds with that name.
+ plus the fact Windows will just load the first library it finds with that name.
NumPy supplies the load_library function as a convenience.
Parameters
@@ -110,12 +110,12 @@ else:
Returns
-------
ctypes.cdll[libpath] : library object
- A ctypes library object
+ A ctypes library object
Raises
------
OSError
- If there is no library with the expected extension, or the
+ If there is no library with the expected extension, or the
library is defective and cannot be loaded.
"""
if ctypes.__version__ < '1.0.1':
@@ -321,7 +321,7 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
# produce a name for the new type
if dtype is None:
name = 'any'
- elif dtype.names:
+ elif dtype.names is not None:
name = str(id(dtype))
else:
name = dtype.str
@@ -535,7 +535,10 @@ if ctypes is not None:
if readonly:
raise TypeError("readonly arrays unsupported")
- dtype = _dtype((ai["typestr"], ai["shape"]))
- result = as_ctypes_type(dtype).from_address(addr)
+ # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
+ # dtype.itemsize (gh-14214)
+ ctype_scalar = as_ctypes_type(ai["typestr"])
+ result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
+ result = result_type.from_address(addr)
result.__keep = obj
return result
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index 55514750e..8dbb63b28 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -1,12 +1,31 @@
+"""
+An enhanced distutils, providing support for Fortran compilers, for BLAS,
+LAPACK and other common libraries for numerical computing, and more.
+
+Public submodules are::
+
+ misc_util
+ system_info
+ cpu_info
+ log
+ exec_command
+
+For details, please see the *Packaging* and *NumPy Distutils User Guide*
+sections of the NumPy Reference Guide.
+
+For configuring the preference for and location of libraries like BLAS and
+LAPACK, and for setting include paths and similar build options, please see
+``site.cfg.example`` in the root of the NumPy repository or sdist.
+
+"""
+
from __future__ import division, absolute_import, print_function
-from .__version__ import version as __version__
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
from . import ccompiler
from . import unixccompiler
-from .info import __doc__
from .npy_pkg_config import *
# If numpy is installed, add distutils.test()
@@ -28,7 +47,7 @@ def customized_fcompiler(plat=None, compiler=None):
c.customize()
return c
-def customized_ccompiler(plat=None, compiler=None):
- c = ccompiler.new_compiler(plat=plat, compiler=compiler)
+def customized_ccompiler(plat=None, compiler=None, verbose=1):
+ c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
c.customize('')
return c
diff --git a/numpy/distutils/__version__.py b/numpy/distutils/__version__.py
deleted file mode 100644
index 969decbba..000000000
--- a/numpy/distutils/__version__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-major = 0
-minor = 4
-micro = 0
-version = '%(major)d.%(minor)d.%(micro)d' % (locals())
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index 14451fa66..643879023 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -140,7 +140,10 @@ def CCompiler_spawn(self, cmd, display=None):
display = ' '.join(list(display))
log.info(display)
try:
- subprocess.check_output(cmd)
+ if self.verbose:
+ subprocess.check_output(cmd)
+ else:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
o = exc.output
s = exc.returncode
@@ -162,7 +165,8 @@ def CCompiler_spawn(self, cmd, display=None):
if is_sequence(cmd):
cmd = ' '.join(list(cmd))
- forward_bytes_to_stdout(o)
+ if self.verbose:
+ forward_bytes_to_stdout(o)
if re.search(b'Too many open files', o):
msg = '\nTry rerunning setup command until build succeeds.'
@@ -727,10 +731,12 @@ if sys.platform == 'win32':
_distutils_new_compiler = new_compiler
def new_compiler (plat=None,
compiler=None,
- verbose=0,
+ verbose=None,
dry_run=0,
force=0):
# Try first C compilers from numpy.distutils.
+ if verbose is None:
+ verbose = log.get_threshold() <= log.INFO
if plat is None:
plat = os.name
try:
@@ -763,6 +769,7 @@ def new_compiler (plat=None,
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name))
compiler = klass(None, dry_run, force)
+ compiler.verbose = verbose
log.debug('new_compiler returns %s' % (klass))
return compiler
diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py
index 3d7101582..5a9da1217 100644
--- a/numpy/distutils/command/build.py
+++ b/numpy/distutils/command/build.py
@@ -16,8 +16,8 @@ class build(old_build):
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
- ('parallel=', 'j',
- "number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
]
help_options = old_build.help_options + [
@@ -28,17 +28,12 @@ class build(old_build):
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
- self.parallel = None
+ self.warn_error = False
def finalize_options(self):
- if self.parallel:
- try:
- self.parallel = int(self.parallel)
- except ValueError:
- raise ValueError("--parallel/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
- plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 910493a77..13edf0717 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -33,15 +33,18 @@ class build_clib(old_build_clib):
('inplace', 'i', 'Build in-place'),
('parallel=', 'j',
"number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
]
- boolean_options = old_build_clib.boolean_options + ['inplace']
+ boolean_options = old_build_clib.boolean_options + ['inplace', 'warn-error']
def initialize_options(self):
old_build_clib.initialize_options(self)
self.fcompiler = None
self.inplace = 0
self.parallel = None
+ self.warn_error = None
def finalize_options(self):
if self.parallel:
@@ -50,7 +53,10 @@ class build_clib(old_build_clib):
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
old_build_clib.finalize_options(self)
- self.set_undefined_options('build', ('parallel', 'parallel'))
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ )
def have_f_sources(self):
for (lib_name, build_info) in self.libraries:
@@ -86,6 +92,10 @@ class build_clib(old_build_clib):
self.compiler.customize(self.distribution,
need_cxx=self.have_cxx_sources())
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
libraries = self.libraries
self.libraries = None
self.compiler.customize_cmd(self)
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index ef54fb25e..cd9b1c6f1 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -33,6 +33,8 @@ class build_ext (old_build_ext):
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
]
help_options = old_build_ext.help_options + [
@@ -40,10 +42,13 @@ class build_ext (old_build_ext):
show_fortran_compilers),
]
+ boolean_options = old_build_ext.boolean_options + ['warn-error']
+
def initialize_options(self):
old_build_ext.initialize_options(self)
self.fcompiler = None
self.parallel = None
+ self.warn_error = None
def finalize_options(self):
if self.parallel:
@@ -69,7 +74,10 @@ class build_ext (old_build_ext):
self.include_dirs.extend(incl_dirs)
old_build_ext.finalize_options(self)
- self.set_undefined_options('build', ('parallel', 'parallel'))
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ )
def run(self):
if not self.extensions:
@@ -116,6 +124,11 @@ class build_ext (old_build_ext):
force=self.force)
self.compiler.customize(self.distribution)
self.compiler.customize_cmd(self)
+
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
self.compiler.show_customization()
# Setup directory for storing generated extra DLL files on Windows
diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py
index 41bb01da5..af8cec08a 100644
--- a/numpy/distutils/command/build_src.py
+++ b/numpy/distutils/command/build_src.py
@@ -53,9 +53,12 @@ class build_src(build_ext.build_ext):
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
+ ('verbose', 'v',
+ "change logging level from WARN to INFO which will show all " +
+ "compiler output")
]
- boolean_options = ['force', 'inplace']
+ boolean_options = ['force', 'inplace', 'verbose']
help_options = []
@@ -76,6 +79,7 @@ class build_src(build_ext.build_ext):
self.swig_opts = None
self.swig_cpp = None
self.swig = None
+ self.verbose = None
def finalize_options(self):
self.set_undefined_options('build',
@@ -90,7 +94,7 @@ class build_src(build_ext.build_ext):
self.data_files = self.distribution.data_files or []
if self.build_src is None:
- plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
# py_modules_dict is used in build_py.find_package_modules
@@ -365,6 +369,13 @@ class build_src(build_ext.build_ext):
build_dir = os.path.join(*([self.build_src]
+name.split('.')[:-1]))
self.mkpath(build_dir)
+
+ if self.verbose:
+ new_level = log.INFO
+ else:
+ new_level = log.WARN
+ old_level = log.set_threshold(new_level)
+
for func in func_sources:
source = func(extension, build_dir)
if not source:
@@ -375,7 +386,7 @@ class build_src(build_ext.build_ext):
else:
log.info(" adding '%s' to sources." % (source,))
new_sources.append(source)
-
+ log.set_threshold(old_level)
return new_sources
def filter_py_files(self, sources):
diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py
index 73a5e98e1..bb362d483 100644
--- a/numpy/distutils/fcompiler/environment.py
+++ b/numpy/distutils/fcompiler/environment.py
@@ -59,17 +59,13 @@ class EnvironmentConfig(object):
if envvar_contents is not None:
envvar_contents = convert(envvar_contents)
if var and append:
- if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
+ if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1':
var.extend(envvar_contents)
else:
+ # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0
+ # to keep old (overwrite flags rather than append to
+ # them) behavior
var = envvar_contents
- if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():
- msg = "{} is used as is, not appended ".format(envvar) + \
- "to flags already defined " + \
- "by numpy.distutils! Use NPY_DISTUTILS_APPEND_FLAGS=1 " + \
- "to obtain appending behavior instead (this " + \
- "behavior will become default in a future release)."
- warnings.warn(msg, UserWarning, stacklevel=3)
else:
var = envvar_contents
if confvar is not None and self._conf:
diff --git a/numpy/distutils/info.py b/numpy/distutils/info.py
deleted file mode 100644
index 2f5310665..000000000
--- a/numpy/distutils/info.py
+++ /dev/null
@@ -1,6 +0,0 @@
-"""
-Enhanced distutils with Fortran compilers support and more.
-"""
-from __future__ import division, absolute_import, print_function
-
-postpone_import = True
diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py
index 37f9fe5dd..ff7de86b1 100644
--- a/numpy/distutils/log.py
+++ b/numpy/distutils/log.py
@@ -67,6 +67,8 @@ def set_threshold(level, force=False):
' %s to %s' % (prev_level, level))
return prev_level
+def get_threshold():
+ return _global_log.threshold
def set_verbosity(v, force=False):
prev_level = _global_log.threshold
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 89171eede..7ba8ad862 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -859,7 +859,7 @@ class Configuration(object):
print(message)
def warn(self, message):
- sys.stderr.write('Warning: %s' % (message,))
+ sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
@@ -1687,6 +1687,41 @@ class Configuration(object):
and will be installed as foo.ini in the 'lib' subpath.
+ When cross-compiling with numpy distutils, it might be necessary to
+ use modified npy-pkg-config files. Using the default/generated files
+ will link with the host libraries (i.e. libnpymath.a). For
+ cross-compilation you of-course need to link with target libraries,
+ while using the host Python installation.
+
+ You can copy out the numpy/core/lib/npy-pkg-config directory, add a
+ pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
+ variable to point to the directory with the modified npy-pkg-config
+ files.
+
+ Example npymath.ini modified for cross-compilation::
+
+ [meta]
+ Name=npymath
+ Description=Portable, core math library implementing C99 standard
+ Version=0.1
+
+ [variables]
+ pkgname=numpy.core
+ pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
+ prefix=${pkgdir}
+ libdir=${prefix}/lib
+ includedir=${prefix}/include
+
+ [default]
+ Libs=-L${libdir} -lnpymath
+ Cflags=-I${includedir}
+ Requires=mlib
+
+ [msvc]
+ Libs=/LIBPATH:${libdir} npymath.lib
+ Cflags=/INCLUDE:${includedir}
+ Requires=mlib
+
"""
if subst_dict is None:
subst_dict = {}
@@ -2092,9 +2127,22 @@ def get_numpy_include_dirs():
return include_dirs
def get_npy_pkg_dir():
- """Return the path where to find the npy-pkg-config directory."""
+ """Return the path where to find the npy-pkg-config directory.
+
+ If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that
+ is returned. Otherwise, a path inside the location of the numpy module is
+ returned.
+
+ The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining
+ customized npy-pkg-config .ini files for the cross-compilation
+ environment, and using them when cross-compiling.
+
+ """
# XXX: import here for bootstrapping reasons
import numpy
+ d = os.environ.get('NPY_PKG_CONFIG_PATH')
+ if d is not None:
+ return d
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 2ff0ba7b3..5fd1003ab 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -146,7 +146,7 @@ else:
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
-from distutils import log
+from numpy.distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import (
@@ -456,7 +456,7 @@ class AliasedOptionError(DistutilsError):
class AtlasNotFoundError(NotFoundError):
"""
- Atlas (http://math-atlas.sourceforge.net/) libraries not found.
+ Atlas (http://github.com/math-atlas/math-atlas) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
@@ -550,7 +550,6 @@ class system_info(object):
dir_env_var = None
search_static_first = 0 # XXX: disabled by default, may disappear in
# future unless it is proved to be useful.
- verbosity = 1
saved_results = {}
notfounderror = NotFoundError
@@ -558,7 +557,6 @@ class system_info(object):
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
- verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
@@ -704,7 +702,7 @@ class system_info(object):
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
- if self.verbosity > 0 and flag:
+ if log.get_threshold() <= log.INFO and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
@@ -914,7 +912,7 @@ class system_info(object):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
- return combine_paths(*args, **{'verbosity': self.verbosity})
+ return combine_paths(*args)
class fft_opt_info(system_info):
@@ -1531,12 +1529,12 @@ def get_atlas_version(**config):
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
- use_tee=(system_info.verbosity > 0))
+ )
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
- use_tee=(system_info.verbosity > 0))
+ )
if not s:
warnings.warn(textwrap.dedent("""
*****************************************************
diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py
index ba19a97ea..6d245fbd4 100644
--- a/numpy/distutils/tests/test_fcompiler.py
+++ b/numpy/distutils/tests/test_fcompiler.py
@@ -45,37 +45,3 @@ def test_fcompiler_flags(monkeypatch):
else:
assert_(new_flags == prev_flags + [new_flag])
-
-def test_fcompiler_flags_append_warning(monkeypatch):
- # Test to check that the warning for append behavior changing in future
- # is triggered. Need to use a real compiler instance so that we have
- # non-empty flags to start with (otherwise the "if var and append" check
- # will always be false).
- try:
- with suppress_warnings() as sup:
- sup.record()
- fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
- fc.customize()
- except numpy.distutils.fcompiler.CompilerNotFound:
- pytest.skip("gfortran not found, so can't execute this test")
-
- # Ensure NPY_DISTUTILS_APPEND_FLAGS not defined
- monkeypatch.delenv('NPY_DISTUTILS_APPEND_FLAGS', raising=False)
-
- for opt, envvar in customizable_flags:
- new_flag = '-dummy-{}-flag'.format(opt)
- with suppress_warnings() as sup:
- sup.record()
- prev_flags = getattr(fc.flag_vars, opt)
-
- monkeypatch.setenv(envvar, new_flag)
- with suppress_warnings() as sup:
- sup.record()
- new_flags = getattr(fc.flag_vars, opt)
- if prev_flags:
- # Check that warning was issued
- assert len(sup.log) == 1
-
- monkeypatch.delenv(envvar)
- assert_(new_flags == [new_flag])
-
diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py
index f7bd2515b..cb548a0d0 100644
--- a/numpy/doc/broadcasting.py
+++ b/numpy/doc/broadcasting.py
@@ -61,8 +61,7 @@ dimensions are compatible when
If these conditions are not met, a
``ValueError: operands could not be broadcast together`` exception is
thrown, indicating that the arrays have incompatible shapes. The size of
-the resulting array is the maximum size along each dimension of the input
-arrays.
+the resulting array is the size that is not 1 along each axis of the inputs.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
diff --git a/numpy/doc/dispatch.py b/numpy/doc/dispatch.py
index 09a3e5134..c9029941b 100644
--- a/numpy/doc/dispatch.py
+++ b/numpy/doc/dispatch.py
@@ -72,7 +72,7 @@ The ``__array_ufunc__`` receives:
- ``inputs``, which could be a mixture of different types
- ``kwargs``, keyword arguments passed to the function
-For this example we will only handle the method ``'__call__``.
+For this example we will only handle the method ``__call__``.
>>> from numbers import Number
>>> class DiagonalArray:
@@ -218,12 +218,12 @@ For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that
calls ``numpy.sum(self)``, and the same for ``mean``.
>>> @implements(np.sum)
-... def sum(a):
+... def sum(arr):
... "Implementation of np.sum for DiagonalArray objects"
... return arr._i * arr._N
...
>>> @implements(np.mean)
-... def sum(a):
+... def mean(arr):
... "Implementation of np.mean for DiagonalArray objects"
... return arr._i / arr._N
...
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index 4b983893a..d0685328e 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -118,7 +118,8 @@ For example, consider the following Python code:
def __new__(cls, *args):
print('Cls in __new__:', cls)
print('Args in __new__:', args)
- return object.__new__(cls, *args)
+ # The `object` type __new__ method takes a single argument.
+ return object.__new__(cls)
def __init__(self, *args):
print('type(self) in __init__:', type(self))
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index d59b6301c..17f3861ca 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -542,7 +542,7 @@ cppmacros[
'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
cppmacros['OLDPYNUM'] = """\
#ifdef OLDPYNUM
-#error You need to install Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369
+#error You need to install NumPy version 13 or higher. See https://scipy.org/install.html
#endif
"""
################# C functions ###############
diff --git a/numpy/f2py/info.py b/numpy/f2py/info.py
deleted file mode 100644
index c895c5de2..000000000
--- a/numpy/f2py/info.py
+++ /dev/null
@@ -1,6 +0,0 @@
-"""Fortran to Python Interface Generator.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-postpone_import = True
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 6769f1b1f..1b41498ea 100644..100755
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -202,7 +202,7 @@ PyMODINIT_FUNC PyInit_#modulename#(void) {
PyMODINIT_FUNC init#modulename#(void) {
#endif
\tint i;
-\tPyObject *m,*d, *s;
+\tPyObject *m,*d, *s, *tmp;
#if PY_VERSION_HEX >= 0x03000000
\tm = #modulename#_module = PyModule_Create(&moduledef);
#else
@@ -224,8 +224,11 @@ PyMODINIT_FUNC init#modulename#(void) {
\tPyDict_SetItemString(d, \"__doc__\", s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
-\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
-\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
+\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) {
+\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
+\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
+\t\tPy_DECREF(tmp);
+\t}
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
index c0c50ce54..a8c1401aa 100644
--- a/numpy/f2py/setup.py
+++ b/numpy/f2py/setup.py
@@ -3,7 +3,7 @@
setup.py for installing F2PY
Usage:
- python setup.py install
+ pip install .
Copyright 2001-2005 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index 4a981bf55..b55385b50 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -80,7 +80,10 @@ PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module
PyFortranObject *fp = NULL;
fp = PyObject_New(PyFortranObject, &PyFortran_Type);
if (fp == NULL) return NULL;
- if ((fp->dict = PyDict_New())==NULL) return NULL;
+ if ((fp->dict = PyDict_New())==NULL) {
+ PyObject_Del(fp);
+ return NULL;
+ }
fp->len = 1;
fp->defs = defs;
return (PyObject *)fp;
@@ -91,7 +94,7 @@ PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module
static void
fortran_dealloc(PyFortranObject *fp) {
Py_XDECREF(fp->dict);
- PyMem_Del(fp);
+ PyObject_Del(fp);
}
diff --git a/numpy/fft/README.md b/numpy/fft/README.md
index 7040a2e9b..f79188139 100644
--- a/numpy/fft/README.md
+++ b/numpy/fft/README.md
@@ -10,11 +10,6 @@ advantages:
- worst case complexity for transform sizes with large prime factors is
`N*log(N)`, because Bluestein's algorithm [3] is used for these cases.
-License
--------
-
-3-clause BSD (see LICENSE.md)
-
Some code details
-----------------
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
index 64b35bc19..fe95d8b17 100644
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -1,9 +1,191 @@
-from __future__ import division, absolute_import, print_function
+"""
+Discrete Fourier Transform (:mod:`numpy.fft`)
+=============================================
+
+.. currentmodule:: numpy.fft
+
+Standard FFTs
+-------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fft Discrete Fourier transform.
+ ifft Inverse discrete Fourier transform.
+ fft2 Discrete Fourier transform in two dimensions.
+ ifft2 Inverse discrete Fourier transform in two dimensions.
+ fftn Discrete Fourier transform in N-dimensions.
+ ifftn Inverse discrete Fourier transform in N dimensions.
+
+Real FFTs
+---------
+
+.. autosummary::
+ :toctree: generated/
+
+ rfft Real discrete Fourier transform.
+ irfft Inverse real discrete Fourier transform.
+ rfft2 Real discrete Fourier transform in two dimensions.
+ irfft2 Inverse real discrete Fourier transform in two dimensions.
+ rfftn Real discrete Fourier transform in N dimensions.
+ irfftn Inverse real discrete Fourier transform in N dimensions.
+
+Hermitian FFTs
+--------------
+
+.. autosummary::
+ :toctree: generated/
+
+ hfft Hermitian discrete Fourier transform.
+ ihfft Inverse Hermitian discrete Fourier transform.
+
+Helper routines
+---------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fftfreq Discrete Fourier Transform sample frequencies.
+ rfftfreq DFT sample frequencies (for usage with rfft, irfft).
+ fftshift Shift zero-frequency component to center of spectrum.
+ ifftshift Inverse of fftshift.
+
+
+Background information
+----------------------
+
+Fourier analysis is fundamentally a method for expressing a function as a
+sum of periodic components, and for recovering the function from those
+components. When both the function and its Fourier transform are
+replaced with discretized counterparts, it is called the discrete Fourier
+transform (DFT). The DFT has become a mainstay of numerical computing in
+part because of a very fast algorithm for computing it, called the Fast
+Fourier Transform (FFT), which was known to Gauss (1805) and was brought
+to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
+provide an accessible introduction to Fourier analysis and its
+applications.
+
+Because the discrete Fourier transform separates its input into
+components that contribute at discrete frequencies, it has a great number
+of applications in digital signal processing, e.g., for filtering, and in
+this context the discretized input to the transform is customarily
+referred to as a *signal*, which exists in the *time domain*. The output
+is called a *spectrum* or *transform* and exists in the *frequency
+domain*.
+
+Implementation details
+----------------------
+
+There are many ways to define the DFT, varying in the sign of the
+exponent, normalization, etc. In this implementation, the DFT is defined
+as
+
+.. math::
+ A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
+ \\qquad k = 0,\\ldots,n-1.
+
+The DFT is in general defined for complex inputs and outputs, and a
+single-frequency component at linear frequency :math:`f` is
+represented by a complex exponential
+:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
+is the sampling interval.
-# To get sub-modules
-from .info import __doc__
+The values in the result follow so-called "standard" order: If ``A =
+fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
+the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
+contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
+negative-frequency terms, in order of decreasingly negative frequency.
+For an even number of input points, ``A[n/2]`` represents both positive and
+negative Nyquist frequency, and is also purely real for real input. For
+an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
+frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
+The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
+of corresponding elements in the output. The routine
+``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
+zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
+that shift.
+
+When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
+is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
+The phase spectrum is obtained by ``np.angle(A)``.
+
+The inverse DFT is defined as
+
+.. math::
+ a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
+ \\qquad m = 0,\\ldots,n-1.
+
+It differs from the forward transform by the sign of the exponential
+argument and the default normalization by :math:`1/n`.
+
+Normalization
+-------------
+The default normalization has the direct transforms unscaled and the inverse
+transforms are scaled by :math:`1/n`. It is possible to obtain unitary
+transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
+`None`) so that both direct and inverse transforms will be scaled by
+:math:`1/\\sqrt{n}`.
+
+Real and Hermitian transforms
+-----------------------------
+
+When the input is purely real, its transform is Hermitian, i.e., the
+component at frequency :math:`f_k` is the complex conjugate of the
+component at frequency :math:`-f_k`, which means that for real
+inputs there is no information in the negative frequency components that
+is not already available from the positive frequency components.
+The family of `rfft` functions is
+designed to operate on real inputs, and exploits this symmetry by
+computing only the positive frequency components, up to and including the
+Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
+output points. The inverses of this family assumes the same symmetry of
+its input, and for an output of ``n`` points uses ``n/2+1`` input points.
+
+Correspondingly, when the spectrum is purely real, the signal is
+Hermitian. The `hfft` family of functions exploits this symmetry by
+using ``n/2+1`` complex points in the input (time) domain for ``n`` real
+points in the frequency domain.
+
+In higher dimensions, FFTs are used, e.g., for image analysis and
+filtering. The computational efficiency of the FFT means that it can
+also be a faster way to compute large convolutions, using the property
+that a convolution in the time domain is equivalent to a point-by-point
+multiplication in the frequency domain.
+
+Higher dimensions
+-----------------
+
+In two dimensions, the DFT is defined as
+
+.. math::
+ A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
+ a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
+ \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
+
+which extends in the obvious way to higher dimensions, and the inverses
+in higher dimensions also extend in the same way.
+
+References
+----------
+
+.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
+ machine calculation of complex Fourier series," *Math. Comput.*
+ 19: 297-301.
+
+.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
+ 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
+ 12-13. Cambridge Univ. Press, Cambridge, UK.
+
+Examples
+--------
+
+For examples, see the various functions.
+
+"""
+
+from __future__ import division, absolute_import, print_function
-from .pocketfft import *
+from ._pocketfft import *
from .helper import *
from numpy._pytesttester import PytestTester
diff --git a/numpy/fft/pocketfft.c b/numpy/fft/_pocketfft.c
index 9d1218e6b..d75b9983c 100644
--- a/numpy/fft/pocketfft.c
+++ b/numpy/fft/_pocketfft.c
@@ -2362,7 +2362,7 @@ static struct PyMethodDef methods[] = {
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
- "pocketfft_internal",
+ "_pocketfft_internal",
NULL,
-1,
methods,
@@ -2376,11 +2376,11 @@ static struct PyModuleDef moduledef = {
/* Initialization function for the module */
#if PY_MAJOR_VERSION >= 3
#define RETVAL(x) x
-PyMODINIT_FUNC PyInit_pocketfft_internal(void)
+PyMODINIT_FUNC PyInit__pocketfft_internal(void)
#else
#define RETVAL(x)
PyMODINIT_FUNC
-initpocketfft_internal(void)
+init_pocketfft_internal(void)
#endif
{
PyObject *m;
@@ -2389,7 +2389,7 @@ initpocketfft_internal(void)
#else
static const char module_documentation[] = "";
- m = Py_InitModule4("pocketfft_internal", methods,
+ m = Py_InitModule4("_pocketfft_internal", methods,
module_documentation,
(PyObject*)NULL,PYTHON_API_VERSION);
#endif
diff --git a/numpy/fft/pocketfft.py b/numpy/fft/_pocketfft.py
index b7f6f1434..50720cda4 100644
--- a/numpy/fft/pocketfft.py
+++ b/numpy/fft/_pocketfft.py
@@ -35,7 +35,7 @@ __all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
import functools
from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt
-from . import pocketfft_internal as pfi
+from . import _pocketfft_internal as pfi
from numpy.core.multiarray import normalize_axis_index
from numpy.core import overrides
@@ -44,7 +44,11 @@ array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.fft')
-def _raw_fft(a, n, axis, is_real, is_forward, fct):
+# `inv_norm` is a float by which the result of the transform needs to be
+# divided. This replaces the original, more intuitive 'fct` parameter to avoid
+# divisions by zero (or alternatively additional checks) in the case of
+# zero-length axes during its computation.
+def _raw_fft(a, n, axis, is_real, is_forward, inv_norm):
axis = normalize_axis_index(axis, a.ndim)
if n is None:
n = a.shape[axis]
@@ -53,6 +57,8 @@ def _raw_fft(a, n, axis, is_real, is_forward, fct):
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
+ fct = 1/inv_norm
+
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
@@ -176,10 +182,10 @@ def fft(a, n=None, axis=-1, norm=None):
a = asarray(a)
if n is None:
n = a.shape[axis]
- fct = 1
+ inv_norm = 1
if norm is not None and _unitary(norm):
- fct = 1 / sqrt(n)
- output = _raw_fft(a, n, axis, False, True, fct)
+ inv_norm = sqrt(n)
+ output = _raw_fft(a, n, axis, False, True, inv_norm)
return output
@@ -271,10 +277,11 @@ def ifft(a, n=None, axis=-1, norm=None):
a = asarray(a)
if n is None:
n = a.shape[axis]
- fct = 1/n
if norm is not None and _unitary(norm):
- fct = 1/sqrt(n)
- output = _raw_fft(a, n, axis, False, False, fct)
+ inv_norm = sqrt(max(n, 1))
+ else:
+ inv_norm = n
+ output = _raw_fft(a, n, axis, False, False, inv_norm)
return output
@@ -359,12 +366,12 @@ def rfft(a, n=None, axis=-1, norm=None):
"""
a = asarray(a)
- fct = 1
+ inv_norm = 1
if norm is not None and _unitary(norm):
if n is None:
n = a.shape[axis]
- fct = 1/sqrt(n)
- output = _raw_fft(a, n, axis, True, True, fct)
+ inv_norm = sqrt(n)
+ output = _raw_fft(a, n, axis, True, True, inv_norm)
return output
@@ -461,10 +468,10 @@ def irfft(a, n=None, axis=-1, norm=None):
a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
- fct = 1/n
+ inv_norm = n
if norm is not None and _unitary(norm):
- fct = 1/sqrt(n)
- output = _raw_fft(a, n, axis, True, False, fct)
+ inv_norm = sqrt(n)
+ output = _raw_fft(a, n, axis, True, False, inv_norm)
return output
diff --git a/numpy/fft/info.py b/numpy/fft/info.py
deleted file mode 100644
index cb6526b44..000000000
--- a/numpy/fft/info.py
+++ /dev/null
@@ -1,187 +0,0 @@
-"""
-Discrete Fourier Transform (:mod:`numpy.fft`)
-=============================================
-
-.. currentmodule:: numpy.fft
-
-Standard FFTs
--------------
-
-.. autosummary::
- :toctree: generated/
-
- fft Discrete Fourier transform.
- ifft Inverse discrete Fourier transform.
- fft2 Discrete Fourier transform in two dimensions.
- ifft2 Inverse discrete Fourier transform in two dimensions.
- fftn Discrete Fourier transform in N-dimensions.
- ifftn Inverse discrete Fourier transform in N dimensions.
-
-Real FFTs
----------
-
-.. autosummary::
- :toctree: generated/
-
- rfft Real discrete Fourier transform.
- irfft Inverse real discrete Fourier transform.
- rfft2 Real discrete Fourier transform in two dimensions.
- irfft2 Inverse real discrete Fourier transform in two dimensions.
- rfftn Real discrete Fourier transform in N dimensions.
- irfftn Inverse real discrete Fourier transform in N dimensions.
-
-Hermitian FFTs
---------------
-
-.. autosummary::
- :toctree: generated/
-
- hfft Hermitian discrete Fourier transform.
- ihfft Inverse Hermitian discrete Fourier transform.
-
-Helper routines
----------------
-
-.. autosummary::
- :toctree: generated/
-
- fftfreq Discrete Fourier Transform sample frequencies.
- rfftfreq DFT sample frequencies (for usage with rfft, irfft).
- fftshift Shift zero-frequency component to center of spectrum.
- ifftshift Inverse of fftshift.
-
-
-Background information
-----------------------
-
-Fourier analysis is fundamentally a method for expressing a function as a
-sum of periodic components, and for recovering the function from those
-components. When both the function and its Fourier transform are
-replaced with discretized counterparts, it is called the discrete Fourier
-transform (DFT). The DFT has become a mainstay of numerical computing in
-part because of a very fast algorithm for computing it, called the Fast
-Fourier Transform (FFT), which was known to Gauss (1805) and was brought
-to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
-provide an accessible introduction to Fourier analysis and its
-applications.
-
-Because the discrete Fourier transform separates its input into
-components that contribute at discrete frequencies, it has a great number
-of applications in digital signal processing, e.g., for filtering, and in
-this context the discretized input to the transform is customarily
-referred to as a *signal*, which exists in the *time domain*. The output
-is called a *spectrum* or *transform* and exists in the *frequency
-domain*.
-
-Implementation details
-----------------------
-
-There are many ways to define the DFT, varying in the sign of the
-exponent, normalization, etc. In this implementation, the DFT is defined
-as
-
-.. math::
- A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
- \\qquad k = 0,\\ldots,n-1.
-
-The DFT is in general defined for complex inputs and outputs, and a
-single-frequency component at linear frequency :math:`f` is
-represented by a complex exponential
-:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
-is the sampling interval.
-
-The values in the result follow so-called "standard" order: If ``A =
-fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
-the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
-contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
-negative-frequency terms, in order of decreasingly negative frequency.
-For an even number of input points, ``A[n/2]`` represents both positive and
-negative Nyquist frequency, and is also purely real for real input. For
-an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
-frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
-The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
-of corresponding elements in the output. The routine
-``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
-zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
-that shift.
-
-When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
-is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
-The phase spectrum is obtained by ``np.angle(A)``.
-
-The inverse DFT is defined as
-
-.. math::
- a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
- \\qquad m = 0,\\ldots,n-1.
-
-It differs from the forward transform by the sign of the exponential
-argument and the default normalization by :math:`1/n`.
-
-Normalization
--------------
-The default normalization has the direct transforms unscaled and the inverse
-transforms are scaled by :math:`1/n`. It is possible to obtain unitary
-transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
-`None`) so that both direct and inverse transforms will be scaled by
-:math:`1/\\sqrt{n}`.
-
-Real and Hermitian transforms
------------------------------
-
-When the input is purely real, its transform is Hermitian, i.e., the
-component at frequency :math:`f_k` is the complex conjugate of the
-component at frequency :math:`-f_k`, which means that for real
-inputs there is no information in the negative frequency components that
-is not already available from the positive frequency components.
-The family of `rfft` functions is
-designed to operate on real inputs, and exploits this symmetry by
-computing only the positive frequency components, up to and including the
-Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
-output points. The inverses of this family assumes the same symmetry of
-its input, and for an output of ``n`` points uses ``n/2+1`` input points.
-
-Correspondingly, when the spectrum is purely real, the signal is
-Hermitian. The `hfft` family of functions exploits this symmetry by
-using ``n/2+1`` complex points in the input (time) domain for ``n`` real
-points in the frequency domain.
-
-In higher dimensions, FFTs are used, e.g., for image analysis and
-filtering. The computational efficiency of the FFT means that it can
-also be a faster way to compute large convolutions, using the property
-that a convolution in the time domain is equivalent to a point-by-point
-multiplication in the frequency domain.
-
-Higher dimensions
------------------
-
-In two dimensions, the DFT is defined as
-
-.. math::
- A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
- a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
- \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
-
-which extends in the obvious way to higher dimensions, and the inverses
-in higher dimensions also extend in the same way.
-
-References
-----------
-
-.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
- machine calculation of complex Fourier series," *Math. Comput.*
- 19: 297-301.
-
-.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
- 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
- 12-13. Cambridge Univ. Press, Cambridge, UK.
-
-Examples
---------
-
-For examples, see the various functions.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['core']
diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py
index 6c3548b65..8c3a31557 100644
--- a/numpy/fft/setup.py
+++ b/numpy/fft/setup.py
@@ -8,8 +8,8 @@ def configuration(parent_package='',top_path=None):
config.add_data_dir('tests')
# Configure pocketfft_internal
- config.add_extension('pocketfft_internal',
- sources=['pocketfft.c']
+ config.add_extension('_pocketfft_internal',
+ sources=['_pocketfft.c']
)
return config
diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py
index db185cb21..453e964fa 100644
--- a/numpy/fft/tests/test_pocketfft.py
+++ b/numpy/fft/tests/test_pocketfft.py
@@ -4,7 +4,7 @@ import numpy as np
import pytest
from numpy.random import random
from numpy.testing import (
- assert_array_almost_equal, assert_array_equal, assert_raises,
+ assert_array_equal, assert_raises, assert_allclose
)
import threading
import sys
@@ -34,109 +34,115 @@ class TestFFT1D(object):
x = random(maxlen) + 1j*random(maxlen)
xr = random(maxlen)
for i in range(1,maxlen):
- assert_array_almost_equal(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
- decimal=12)
- assert_array_almost_equal(np.fft.irfft(np.fft.rfft(xr[0:i]),i),
- xr[0:i], decimal=12)
+ assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
+ atol=1e-12)
+ assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]),i),
+ xr[0:i], atol=1e-12)
def test_fft(self):
x = random(30) + 1j*random(30)
- assert_array_almost_equal(fft1(x), np.fft.fft(x))
- assert_array_almost_equal(fft1(x) / np.sqrt(30),
- np.fft.fft(x, norm="ortho"))
+ assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
+ assert_allclose(fft1(x) / np.sqrt(30),
+ np.fft.fft(x, norm="ortho"), atol=1e-6)
- def test_ifft(self):
+ @pytest.mark.parametrize('norm', (None, 'ortho'))
+ def test_ifft(self, norm):
x = random(30) + 1j*random(30)
- assert_array_almost_equal(x, np.fft.ifft(np.fft.fft(x)))
- assert_array_almost_equal(
- x, np.fft.ifft(np.fft.fft(x, norm="ortho"), norm="ortho"))
+ assert_allclose(
+ x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
+ atol=1e-6)
+ # Ensure we get the correct error message
+ with pytest.raises(ValueError,
+ match='Invalid number of FFT data points'):
+ np.fft.ifft([], norm=norm)
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
- assert_array_almost_equal(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
- np.fft.fft2(x))
- assert_array_almost_equal(np.fft.fft2(x) / np.sqrt(30 * 20),
- np.fft.fft2(x, norm="ortho"))
+ assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
+ np.fft.fft2(x), atol=1e-6)
+ assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
+ np.fft.fft2(x, norm="ortho"), atol=1e-6)
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
- assert_array_almost_equal(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
- np.fft.ifft2(x))
- assert_array_almost_equal(np.fft.ifft2(x) * np.sqrt(30 * 20),
- np.fft.ifft2(x, norm="ortho"))
+ assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
+ np.fft.ifft2(x), atol=1e-6)
+ assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
+ np.fft.ifft2(x, norm="ortho"), atol=1e-6)
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
- assert_array_almost_equal(
+ assert_allclose(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
- np.fft.fftn(x))
- assert_array_almost_equal(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
- np.fft.fftn(x, norm="ortho"))
+ np.fft.fftn(x), atol=1e-6)
+ assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
+ np.fft.fftn(x, norm="ortho"), atol=1e-6)
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
- assert_array_almost_equal(
+ assert_allclose(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
- np.fft.ifftn(x))
- assert_array_almost_equal(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
- np.fft.ifftn(x, norm="ortho"))
+ np.fft.ifftn(x), atol=1e-6)
+ assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
+ np.fft.ifftn(x, norm="ortho"), atol=1e-6)
def test_rfft(self):
x = random(30)
for n in [x.size, 2*x.size]:
for norm in [None, 'ortho']:
- assert_array_almost_equal(
+ assert_allclose(
np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
- np.fft.rfft(x, n=n, norm=norm))
- assert_array_almost_equal(np.fft.rfft(x, n=n) / np.sqrt(n),
- np.fft.rfft(x, n=n, norm="ortho"))
+ np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
+ assert_allclose(
+ np.fft.rfft(x, n=n) / np.sqrt(n),
+ np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
def test_irfft(self):
x = random(30)
- assert_array_almost_equal(x, np.fft.irfft(np.fft.rfft(x)))
- assert_array_almost_equal(
- x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"))
+ assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
+ assert_allclose(
+ x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"), atol=1e-6)
def test_rfft2(self):
x = random((30, 20))
- assert_array_almost_equal(np.fft.fft2(x)[:, :11], np.fft.rfft2(x))
- assert_array_almost_equal(np.fft.rfft2(x) / np.sqrt(30 * 20),
- np.fft.rfft2(x, norm="ortho"))
+ assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
+ assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
+ np.fft.rfft2(x, norm="ortho"), atol=1e-6)
def test_irfft2(self):
x = random((30, 20))
- assert_array_almost_equal(x, np.fft.irfft2(np.fft.rfft2(x)))
- assert_array_almost_equal(
- x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"))
+ assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
+ assert_allclose(
+ x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"), atol=1e-6)
def test_rfftn(self):
x = random((30, 20, 10))
- assert_array_almost_equal(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x))
- assert_array_almost_equal(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
- np.fft.rfftn(x, norm="ortho"))
+ assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
+ assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
+ np.fft.rfftn(x, norm="ortho"), atol=1e-6)
def test_irfftn(self):
x = random((30, 20, 10))
- assert_array_almost_equal(x, np.fft.irfftn(np.fft.rfftn(x)))
- assert_array_almost_equal(
- x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"))
+ assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
+ assert_allclose(
+ x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"), atol=1e-6)
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
- assert_array_almost_equal(np.fft.fft(x), np.fft.hfft(x_herm))
- assert_array_almost_equal(np.fft.hfft(x_herm) / np.sqrt(30),
- np.fft.hfft(x_herm, norm="ortho"))
+ assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
+ assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
+ np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
def test_ihttf(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
- assert_array_almost_equal(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)))
- assert_array_almost_equal(
+ assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
+ assert_allclose(
x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"),
- norm="ortho"))
+ norm="ortho"), atol=1e-6)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn])
@@ -146,7 +152,7 @@ class TestFFT1D(object):
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
- assert_array_almost_equal(op_tr, tr_op)
+ assert_allclose(op_tr, tr_op, atol=1e-6)
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
@@ -164,8 +170,8 @@ class TestFFT1D(object):
for norm in [None, 'ortho']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
- assert_array_almost_equal(x_norm,
- np.linalg.norm(tmp))
+ assert_allclose(x_norm,
+ np.linalg.norm(tmp), atol=1e-6)
@pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
np.longdouble])
@@ -173,8 +179,8 @@ class TestFFT1D(object):
# make sure that all input precisions are accepted and internally
# converted to 64bit
x = random(30).astype(dtype)
- assert_array_almost_equal(np.fft.ifft(np.fft.fft(x)), x)
- assert_array_almost_equal(np.fft.irfft(np.fft.rfft(x)), x)
+ assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6)
+ assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6)
@pytest.mark.parametrize(
@@ -190,6 +196,8 @@ def test_fft_with_order(dtype, order, fft):
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
+ # See discussion in pull/14178
+ _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
if order == 'F':
Y = np.asfortranarray(X)
else:
@@ -201,7 +209,7 @@ def test_fft_with_order(dtype, order, fft):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
- assert_array_almost_equal(X_res, Y_res)
+ assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
@@ -209,9 +217,9 @@ def test_fft_with_order(dtype, order, fft):
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
- assert_array_almost_equal(X_res, Y_res)
+ assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
else:
- raise ValueError
+ raise ValueError()
class TestFFTThreadSafe(object):
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index c1757150e..2db12d9a4 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -1,14 +1,31 @@
+"""
+**Note:** almost all functions in the ``numpy.lib`` namespace
+are also present in the main ``numpy`` namespace. Please use the
+functions as ``np.<funcname>`` where possible.
+
+``numpy.lib`` is mostly a space for implementing functions that don't
+belong in core or in another NumPy submodule with a clear purpose
+(e.g. ``random``, ``fft``, ``linalg``, ``ma``).
+
+Most contains basic functions that are used by several submodules and are
+useful to have in the main name-space.
+
+"""
from __future__ import division, absolute_import, print_function
import math
-from .info import __doc__
from numpy.version import version as __version__
+# Public submodules
+# Note: recfunctions and (maybe) format are public too, but not imported
+from . import mixins
+from . import scimath as emath
+
+# Private submodules
from .type_check import *
from .index_tricks import *
from .function_base import *
-from .mixins import *
from .nanfunctions import *
from .shape_base import *
from .stride_tricks import *
@@ -16,9 +33,7 @@ from .twodim_base import *
from .ufunclike import *
from .histograms import *
-from . import scimath as emath
from .polynomial import *
-#import convertcode
from .utils import *
from .arraysetops import *
from .npyio import *
@@ -28,11 +43,10 @@ from .arraypad import *
from ._version import *
from numpy.core._multiarray_umath import tracemalloc_domain
-__all__ = ['emath', 'math', 'tracemalloc_domain']
+__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator']
__all__ += type_check.__all__
__all__ += index_tricks.__all__
__all__ += function_base.__all__
-__all__ += mixins.__all__
__all__ += shape_base.__all__
__all__ += stride_tricks.__all__
__all__ += twodim_base.__all__
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 0ebd39b8c..c392929fd 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -121,7 +121,7 @@ def has_nested_fields(ndtype):
"""
for name in ndtype.names or ():
- if ndtype[name].names:
+ if ndtype[name].names is not None:
return True
return False
@@ -931,28 +931,27 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
- nbtypes = len(ndtype)
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, basestring):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
- if nbtypes == 0:
+ if ndtype.names is None:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
- ndtype.names = validate(names, nbfields=nbtypes,
+ ndtype.names = validate(names, nbfields=len(ndtype.names),
defaultfmt=defaultfmt)
# No implicit names
- elif (nbtypes > 0):
+ elif ndtype.names is not None:
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
- if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and
+ if ((ndtype.names == tuple("f%i" % i for i in range(len(ndtype.names)))) and
(defaultfmt != "f%i")):
- ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)
+ ndtype.names = validate([''] * len(ndtype.names), defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index f08d425d6..33e64708d 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -17,66 +17,6 @@ __all__ = ['pad']
# Private utility functions.
-def _linear_ramp(ndim, axis, start, stop, size, reverse=False):
- """
- Create a linear ramp of `size` in `axis` with `ndim`.
-
- This algorithm behaves like a vectorized version of `numpy.linspace`.
- The resulting linear ramp is broadcastable to any array that matches the
- ramp in `shape[axis]` and `ndim`.
-
- Parameters
- ----------
- ndim : int
- Number of dimensions of the resulting array. All dimensions except
- the one specified by `axis` will have the size 1.
- axis : int
- The dimension that contains the linear ramp of `size`.
- start : int or ndarray
- The starting value(s) of the linear ramp. If given as an array, its
- size must match `size`.
- stop : int or ndarray
- The stop value(s) (not included!) of the linear ramp. If given as an
- array, its size must match `size`.
- size : int
- The number of elements in the linear ramp. If this argument is 0 the
- dimensions of `ramp` will all be of length 1 except for the one given
- by `axis` which will be 0.
- reverse : bool
- If False, increment in a positive fashion, otherwise decrement.
-
- Returns
- -------
- ramp : ndarray
- Output array of dtype np.float64 that in- or decrements along the given
- `axis`.
-
- Examples
- --------
- >>> _linear_ramp(ndim=2, axis=0, start=np.arange(3), stop=10, size=2)
- array([[0. , 1. , 2. ],
- [5. , 5.5, 6. ]])
- >>> _linear_ramp(ndim=3, axis=0, start=2, stop=0, size=0)
- array([], shape=(0, 1, 1), dtype=float64)
- """
- # Create initial ramp
- ramp = np.arange(size, dtype=np.float64)
- if reverse:
- ramp = ramp[::-1]
-
- # Make sure, that ramp is broadcastable
- init_shape = (1,) * axis + (size,) + (1,) * (ndim - axis - 1)
- ramp = ramp.reshape(init_shape)
-
- if size != 0:
- # And scale to given start and stop values
- gain = (stop - start) / float(size)
- ramp = ramp * gain
- ramp += start
-
- return ramp
-
-
def _round_if_needed(arr, dtype):
"""
Rounds arr inplace if destination dtype is integer.
@@ -269,17 +209,25 @@ def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
"""
edge_pair = _get_edges(padded, axis, width_pair)
- left_ramp = _linear_ramp(
- padded.ndim, axis, start=end_value_pair[0], stop=edge_pair[0],
- size=width_pair[0], reverse=False
+ left_ramp = np.linspace(
+ start=end_value_pair[0],
+ stop=edge_pair[0].squeeze(axis), # Dimensions is replaced by linspace
+ num=width_pair[0],
+ endpoint=False,
+ dtype=padded.dtype,
+ axis=axis,
)
- _round_if_needed(left_ramp, padded.dtype)
- right_ramp = _linear_ramp(
- padded.ndim, axis, start=end_value_pair[1], stop=edge_pair[1],
- size=width_pair[1], reverse=True
+ right_ramp = np.linspace(
+ start=end_value_pair[1],
+ stop=edge_pair[1].squeeze(axis), # Dimension is replaced by linspace
+ num=width_pair[1],
+ endpoint=False,
+ dtype=padded.dtype,
+ axis=axis,
)
- _round_if_needed(right_ramp, padded.dtype)
+ # Reverse linear space in appropriate dimension
+ right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
return left_ramp, right_ramp
@@ -323,6 +271,12 @@ def _get_stats(padded, axis, width_pair, length_pair, stat_func):
if right_length is None or max_length < right_length:
right_length = max_length
+ if (left_length == 0 or right_length == 0) \
+ and stat_func in {np.amax, np.amin}:
+ # amax and amin can't operate on an emtpy array,
+ # raise a more descriptive warning here instead of the default one
+ raise ValueError("stat_length of 0 yields no value for padding")
+
# Calculate statistic for the left side
left_slice = _slice_at_axis(
slice(left_index, left_index + left_length), axis)
@@ -340,6 +294,7 @@ def _get_stats(padded, axis, width_pair, length_pair, stat_func):
right_chunk = padded[right_slice]
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
_round_if_needed(right_stat, padded.dtype)
+
return left_stat, right_stat
@@ -835,7 +790,7 @@ def pad(array, pad_width, mode='constant', **kwargs):
raise ValueError("unsupported keyword arguments for mode '{}': {}"
.format(mode, unsupported_kwargs))
- stat_functions = {"maximum": np.max, "minimum": np.min,
+ stat_functions = {"maximum": np.amax, "minimum": np.amin,
"mean": np.mean, "median": np.median}
# Create array with final shape and original values
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index b53d8c03f..2309f7e42 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -213,6 +213,7 @@ def unique(ar, return_index=False, return_inverse=False,
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
+ (move the axis to the first dimension to keep the order of the other axes)
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
@@ -264,7 +265,7 @@ def unique(ar, return_index=False, return_inverse=False,
# axis was specified and not None
try:
- ar = np.swapaxes(ar, axis, 0)
+ ar = np.moveaxis(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
raise np.AxisError(axis, ar.ndim)
@@ -285,7 +286,7 @@ def unique(ar, return_index=False, return_inverse=False,
def reshape_uniq(uniq):
uniq = uniq.view(orig_dtype)
uniq = uniq.reshape(-1, *orig_shape[1:])
- uniq = np.swapaxes(uniq, 0, axis)
+ uniq = np.moveaxis(uniq, 0, axis)
return uniq
output = _unique1d(consolidated, return_index,
@@ -383,6 +384,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
To return the indices of the values common to the input arrays
along with the intersected values:
+
>>> x = np.array([1, 1, 2, 3, 4])
>>> y = np.array([2, 1, 4, 6])
>>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index 216687475..d72384e99 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -715,8 +715,6 @@ def irr(values):
>>> round(np.irr([-5, 10.5, 1, -8, 1]), 5)
0.0886
- (Compare with the Example given for numpy.lib.financial.npv)
-
"""
# `np.roots` call is why this function does not support Decimal type.
#
@@ -763,6 +761,15 @@ def npv(rate, values):
The NPV of the input cash flow series `values` at the discount
`rate`.
+ Warnings
+ --------
+ ``npv`` considers a series of cashflows starting in the present (t = 0).
+ NPV can also be defined with a series of future cashflows, paid at the
+ end, rather than the start, of each period. If future cashflows are used,
+ the first cashflow `values[0]` must be zeroed and added to the net
+ present value of the future cashflows. This is demonstrated in the
+ examples.
+
Notes
-----
Returns the result of: [G]_
@@ -776,10 +783,24 @@ def npv(rate, values):
Examples
--------
- >>> np.npv(0.281,[-100, 39, 59, 55, 20])
- -0.0084785916384548798 # may vary
-
- (Compare with the Example given for numpy.lib.financial.irr)
+ Consider a potential project with an initial investment of $40 000 and
+ projected cashflows of $5 000, $8 000, $12 000 and $30 000 at the end of
+ each period discounted at a rate of 8% per period. To find the project's
+ net present value:
+
+ >>> rate, cashflows = 0.08, [-40_000, 5_000, 8_000, 12_000, 30_000]
+ >>> np.npv(rate, cashflows).round(5)
+ 3065.22267
+
+ It may be preferable to split the projected cashflow into an initial
+ investment and expected future cashflows. In this case, the value of
+ the initial cashflow is zero and the initial investment is later added
+ to the future cashflows net present value:
+
+ >>> initial_cashflow = cashflows[0]
+ >>> cashflows[0] = 0
+ >>> np.round(np.npv(rate, cashflows) + initial_cashflow, 5)
+ 3065.22267
"""
values = np.asarray(values)
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 3bf818812..1ecd72815 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -173,6 +173,9 @@ from numpy.compat import (
)
+__all__ = []
+
+
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 9d380e67d..ebf918012 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -316,14 +316,17 @@ def average(a, axis=None, weights=None, returned=False):
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
- weight equal to one.
+ weight equal to one. The 1-D calculation is::
+
+ avg = sum(a * weights) / sum(weights)
+
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
-
Returns
-------
retval, [sum_of_weights] : array_type or double
@@ -679,11 +682,7 @@ def select(condlist, choicelist, default=0):
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
- # 2014-02-24, 1.9
- warnings.warn("select with an empty condition list is not possible"
- "and will be deprecated",
- DeprecationWarning, stacklevel=3)
- return np.asarray(default)[()]
+ raise ValueError("select with an empty condition list is not possible")
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
@@ -699,25 +698,11 @@ def select(condlist, choicelist, default=0):
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
- deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
- if np.issubdtype(cond.dtype, np.integer):
- # A previous implementation accepted int ndarrays accidentally.
- # Supported here deliberately, but deprecated.
- condlist[i] = condlist[i].astype(bool)
- deprecated_ints = True
- else:
- raise ValueError(
- 'invalid entry {} in condlist: should be boolean ndarray'.format(i))
-
- if deprecated_ints:
- # 2014-02-24, 1.9
- msg = "select condlists containing integer ndarrays is deprecated " \
- "and will be removed in the future. Use `.astype(bool)` to " \
- "convert to bools."
- warnings.warn(msg, DeprecationWarning, stacklevel=3)
+ raise TypeError(
+ 'invalid entry {} in condlist: should be boolean ndarray'.format(i))
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
@@ -1164,11 +1149,13 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
The axis along which the difference is taken, default is the
last axis.
prepend, append : array_like, optional
- Values to prepend or append to "a" along axis prior to
+ Values to prepend or append to `a` along axis prior to
performing the difference. Scalar values are expanded to
arrays with length 1 in the direction of axis and the shape
of the input array in along all other axes. Otherwise the
- dimension and shape must match "a" except along axis.
+ dimension and shape must match `a` except along axis.
+
+ .. versionadded:: 1.16.0
Returns
-------
diff --git a/numpy/lib/info.py b/numpy/lib/info.py
deleted file mode 100644
index 8815a52f0..000000000
--- a/numpy/lib/info.py
+++ /dev/null
@@ -1,160 +0,0 @@
-"""
-Basic functions used by several sub-packages and
-useful to have in the main name-space.
-
-Type Handling
--------------
-================ ===================
-iscomplexobj Test for complex object, scalar result
-isrealobj Test for real object, scalar result
-iscomplex Test for complex elements, array result
-isreal Test for real elements, array result
-imag Imaginary part
-real Real part
-real_if_close Turns complex number with tiny imaginary part to real
-isneginf Tests for negative infinity, array result
-isposinf Tests for positive infinity, array result
-isnan Tests for nans, array result
-isinf Tests for infinity, array result
-isfinite Tests for finite numbers, array result
-isscalar True if argument is a scalar
-nan_to_num Replaces NaN's with 0 and infinities with large numbers
-cast Dictionary of functions to force cast to each type
-common_type Determine the minimum common type code for a group
- of arrays
-mintypecode Return minimal allowed common typecode.
-================ ===================
-
-Index Tricks
-------------
-================ ===================
-mgrid Method which allows easy construction of N-d
- 'mesh-grids'
-``r_`` Append and construct arrays: turns slice objects into
- ranges and concatenates them, for 2d arrays appends rows.
-index_exp Konrad Hinsen's index_expression class instance which
- can be useful for building complicated slicing syntax.
-================ ===================
-
-Useful Functions
-----------------
-================ ===================
-select Extension of where to multiple conditions and choices
-extract Extract 1d array from flattened array according to mask
-insert Insert 1d array of values into Nd array according to mask
-linspace Evenly spaced samples in linear space
-logspace Evenly spaced samples in logarithmic space
-fix Round x to nearest integer towards zero
-mod Modulo mod(x,y) = x % y except keeps sign of y
-amax Array maximum along axis
-amin Array minimum along axis
-ptp Array max-min along axis
-cumsum Cumulative sum along axis
-prod Product of elements along axis
-cumprod Cumluative product along axis
-diff Discrete differences along axis
-angle Returns angle of complex argument
-unwrap Unwrap phase along given axis (1-d algorithm)
-sort_complex Sort a complex-array (based on real, then imaginary)
-trim_zeros Trim the leading and trailing zeros from 1D array.
-vectorize A class that wraps a Python function taking scalar
- arguments into a generalized function which can handle
- arrays of arguments using the broadcast rules of
- numerix Python.
-================ ===================
-
-Shape Manipulation
-------------------
-================ ===================
-squeeze Return a with length-one dimensions removed.
-atleast_1d Force arrays to be >= 1D
-atleast_2d Force arrays to be >= 2D
-atleast_3d Force arrays to be >= 3D
-vstack Stack arrays vertically (row on row)
-hstack Stack arrays horizontally (column on column)
-column_stack Stack 1D arrays as columns into 2D array
-dstack Stack arrays depthwise (along third dimension)
-stack Stack arrays along a new axis
-split Divide array into a list of sub-arrays
-hsplit Split into columns
-vsplit Split into rows
-dsplit Split along third dimension
-================ ===================
-
-Matrix (2D Array) Manipulations
--------------------------------
-================ ===================
-fliplr 2D array with columns flipped
-flipud 2D array with rows flipped
-rot90 Rotate a 2D array a multiple of 90 degrees
-eye Return a 2D array with ones down a given diagonal
-diag Construct a 2D array from a vector, or return a given
- diagonal from a 2D array.
-mat Construct a Matrix
-bmat Build a Matrix from blocks
-================ ===================
-
-Polynomials
------------
-================ ===================
-poly1d A one-dimensional polynomial class
-poly Return polynomial coefficients from roots
-roots Find roots of polynomial given coefficients
-polyint Integrate polynomial
-polyder Differentiate polynomial
-polyadd Add polynomials
-polysub Subtract polynomials
-polymul Multiply polynomials
-polydiv Divide polynomials
-polyval Evaluate polynomial at given argument
-================ ===================
-
-Iterators
----------
-================ ===================
-Arrayterator A buffered iterator for big arrays.
-================ ===================
-
-Import Tricks
--------------
-================ ===================
-ppimport Postpone module import until trying to use it
-ppimport_attr Postpone module import until trying to use its attribute
-ppresolve Import postponed module and return it.
-================ ===================
-
-Machine Arithmetics
--------------------
-================ ===================
-machar_single Single precision floating point arithmetic parameters
-machar_double Double precision floating point arithmetic parameters
-================ ===================
-
-Threading Tricks
-----------------
-================ ===================
-ParallelExec Execute commands in parallel thread.
-================ ===================
-
-Array Set Operations
------------------------
-Set operations for numeric arrays based on sort() function.
-
-================ ===================
-unique Unique elements of an array.
-isin Test whether each element of an ND array is present
- anywhere within a second array.
-ediff1d Array difference (auxiliary function).
-intersect1d Intersection of 1D arrays with unique elements.
-setxor1d Set exclusive-or of 1D arrays with unique elements.
-in1d Test whether elements in a 1D array are also present in
- another array.
-union1d Union of 1D arrays with unique elements.
-setdiff1d Set difference of 1D arrays with unique elements.
-================ ===================
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['core', 'testing']
-global_symbols = ['*']
diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py
index 52ad45b68..f974a7724 100644
--- a/numpy/lib/mixins.py
+++ b/numpy/lib/mixins.py
@@ -5,8 +5,8 @@ import sys
from numpy.core import umath as um
-# Nothing should be exposed in the top-level NumPy module.
-__all__ = []
+
+__all__ = ['NDArrayOperatorsMixin']
def _disables_array_ufunc(obj):
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 9a03d0b39..6cffab6ac 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -1443,7 +1443,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
- the default is `float32`; for arrays of float types it is the same as
+ the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index b9dc444f8..e57a6dd47 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -506,7 +506,9 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
-
+
+ Any data saved to the file is appended to the end of the file.
+
Examples
--------
>>> from tempfile import TemporaryFile
@@ -519,6 +521,15 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ >>> with open('test.npy', 'wb') as f:
+ ... np.save(f, np.array([1, 2]))
+ ... np.save(f, np.array([1, 3]))
+ >>> with open('test.npy', 'rb') as f:
+ ... a = np.load(f)
+ ... b = np.load(f)
+ >>> print(a, b)
+ # [1 2] [1 3]
"""
own_fid = False
if hasattr(file, 'write'):
@@ -1776,12 +1787,13 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
replace_space=replace_space)
# Skip the first `skip_header` rows
- for i in range(skip_header):
- next(fhd)
-
- # Keep on until we find the first valid values
- first_values = None
try:
+ for i in range(skip_header):
+ next(fhd)
+
+ # Keep on until we find the first valid values
+ first_values = None
+
while not first_values:
first_line = _decode_line(next(fhd), encoding)
if (names is True) and (comments is not None):
@@ -2168,7 +2180,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
- if names and dtype.names:
+ if names and dtype.names is not None:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
@@ -2218,7 +2230,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
#
output = np.array(data, dtype)
if usemask:
- if dtype.names:
+ if dtype.names is not None:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index 6e257bb3f..927161ddb 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -72,7 +72,7 @@ def recursive_fill_fields(input, output):
current = input[field]
except ValueError:
continue
- if current.dtype.names:
+ if current.dtype.names is not None:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
@@ -139,11 +139,11 @@ def get_names(adtype):
names = adtype.names
for name in names:
current = adtype[name]
- if current.names:
+ if current.names is not None:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
- return tuple(listnames) or None
+ return tuple(listnames)
def get_names_flat(adtype):
@@ -176,9 +176,9 @@ def get_names_flat(adtype):
for name in names:
listnames.append(name)
current = adtype[name]
- if current.names:
+ if current.names is not None:
listnames.extend(get_names_flat(current))
- return tuple(listnames) or None
+ return tuple(listnames)
def flatten_descr(ndtype):
@@ -200,7 +200,7 @@ def flatten_descr(ndtype):
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
- if typ.names:
+ if typ.names is not None:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
@@ -215,8 +215,8 @@ def _zip_dtype(seqarrays, flatten=False):
else:
for a in seqarrays:
current = a.dtype
- if current.names and len(current.names) <= 1:
- # special case - dtypes of 0 or 1 field are flattened
+ if current.names is not None and len(current.names) == 1:
+ # special case - dtypes of 1 field are flattened
newdtype.extend(_get_fieldspec(current))
else:
newdtype.append(('', current))
@@ -268,7 +268,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,):
names = adtype.names
for name in names:
current = adtype[name]
- if current.names:
+ if current.names is not None:
if lastname:
parents[name] = [lastname, ]
else:
@@ -281,7 +281,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,):
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
- return parents or None
+ return parents
def _izip_fields_flat(iterable):
@@ -435,7 +435,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
# Make sure we have named fields
- if not seqdtype.names:
+ if seqdtype.names is None:
seqdtype = np.dtype([('', seqdtype)])
if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everythng's a-ok
@@ -527,6 +527,10 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
Nested fields are supported.
+ ..versionchanged: 1.18.0
+ `drop_fields` returns an array with 0 fields if all fields are dropped,
+ rather than returning ``None`` as it did previously.
+
Parameters
----------
base : array
@@ -566,7 +570,7 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
current = ndtype[name]
if name in drop_names:
continue
- if current.names:
+ if current.names is not None:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
@@ -575,8 +579,6 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
- if not newdtype:
- return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
@@ -653,7 +655,7 @@ def rename_fields(base, namemapper):
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
- if current.names:
+ if current.names is not None:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
@@ -874,16 +876,35 @@ def _get_fields_and_offsets(dt, offset=0):
scalar fields in the dtype "dt", including nested fields, in left
to right order.
"""
+
+ # counts up elements in subarrays, including nested subarrays, and returns
+ # base dtype and count
+ def count_elem(dt):
+ count = 1
+ while dt.shape != ():
+ for size in dt.shape:
+ count *= size
+ dt = dt.base
+ return dt, count
+
fields = []
for name in dt.names:
field = dt.fields[name]
- if field[0].names is None:
- count = 1
- for size in field[0].shape:
- count *= size
- fields.append((field[0], count, field[1] + offset))
+ f_dt, f_offset = field[0], field[1]
+ f_dt, n = count_elem(f_dt)
+
+ if f_dt.names is None:
+ fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
else:
- fields.extend(_get_fields_and_offsets(field[0], field[1] + offset))
+ subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
+ size = f_dt.itemsize
+
+ for i in range(n):
+ if i == 0:
+ # optimization: avoid list comprehension if no subarray
+ fields.extend(subfields)
+ else:
+ fields.extend([(d, c, o + i*size) for d, c, o in subfields])
return fields
@@ -948,6 +969,12 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
fields = _get_fields_and_offsets(arr.dtype)
n_fields = len(fields)
+ if n_fields == 0 and dtype is None:
+ raise ValueError("arr has no fields. Unable to guess dtype")
+ elif n_fields == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("arr with no fields is not supported")
+
dts, counts, offsets = zip(*fields)
names = ['f{}'.format(n) for n in range(n_fields)]
@@ -1039,6 +1066,9 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
if arr.shape == ():
raise ValueError('arr must have at least one dimension')
n_elem = arr.shape[-1]
+ if n_elem == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("last axis with size 0 is not supported")
if dtype is None:
if names is None:
@@ -1051,7 +1081,11 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
raise ValueError("don't supply both dtype and names")
# sanity check of the input dtype
fields = _get_fields_and_offsets(dtype)
- dts, counts, offsets = zip(*fields)
+ if len(fields) == 0:
+ dts, counts, offsets = [], [], []
+ else:
+ dts, counts, offsets = zip(*fields)
+
if n_elem != sum(counts):
raise ValueError('The length of the last dimension of arr must '
'be equal to the number of fields in dtype')
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index a5d0040aa..92d52109e 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -782,7 +782,7 @@ def _split_dispatcher(ary, indices_or_sections, axis=None):
@array_function_dispatch(_split_dispatcher)
def split(ary, indices_or_sections, axis=0):
"""
- Split an array into multiple sub-arrays.
+ Split an array into multiple sub-arrays as views into `ary`.
Parameters
----------
@@ -809,7 +809,7 @@ def split(ary, indices_or_sections, axis=0):
Returns
-------
sub-arrays : list of ndarrays
- A list of sub-arrays.
+ A list of sub-arrays as views into `ary`.
Raises
------
@@ -854,8 +854,7 @@ def split(ary, indices_or_sections, axis=0):
if N % sections:
raise ValueError(
'array split does not result in an equal division')
- res = array_split(ary, indices_or_sections, axis)
- return res
+ return array_split(ary, indices_or_sections, axis)
def _hvdsplit_dispatcher(ary, indices_or_sections):
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index b7630cdcd..65593dd29 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -2,7 +2,6 @@
"""
from __future__ import division, absolute_import, print_function
-from itertools import chain
import pytest
@@ -11,6 +10,12 @@ from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from numpy.lib.arraypad import _as_pairs
+_numeric_dtypes = (
+ np.sctypes["uint"]
+ + np.sctypes["int"]
+ + np.sctypes["float"]
+ + np.sctypes["complex"]
+)
_all_modes = {
'constant': {'constant_values': 0},
'edge': {},
@@ -469,6 +474,29 @@ class TestStatistic(object):
)
assert_array_equal(a, b)
+ @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning")
+ @pytest.mark.filterwarnings(
+ "ignore:invalid value encountered in (true_divide|double_scalars):"
+ "RuntimeWarning"
+ )
+ @pytest.mark.parametrize("mode", ["mean", "median"])
+ def test_zero_stat_length_valid(self, mode):
+ arr = np.pad([1., 2.], (1, 2), mode, stat_length=0)
+ expected = np.array([np.nan, 1., 2., np.nan, np.nan])
+ assert_equal(arr, expected)
+
+ @pytest.mark.parametrize("mode", ["minimum", "maximum"])
+ def test_zero_stat_length_invalid(self, mode):
+ match = "stat_length of 0 yields no value for padding"
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=(1, 0))
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=(1, 0))
+
class TestConstant(object):
def test_check_constant(self):
@@ -715,6 +743,24 @@ class TestLinearRamp(object):
assert_equal(a[0, :], 0.)
assert_equal(a[-1, :], 0.)
+ @pytest.mark.parametrize("dtype", _numeric_dtypes)
+ def test_negative_difference(self, dtype):
+ """
+ Check correct behavior of unsigned dtypes if there is a negative
+ difference between the edge to pad and `end_values`. Check both cases
+ to be independent of implementation. Test behavior for all other dtypes
+ in case dtype casting interferes with complex dtypes. See gh-14191.
+ """
+ x = np.array([3], dtype=dtype)
+ result = np.pad(x, 3, mode="linear_ramp", end_values=0)
+ expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
+ assert_equal(result, expected)
+
+ x = np.array([0], dtype=dtype)
+ result = np.pad(x, 3, mode="linear_ramp", end_values=3)
+ expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)
+ assert_equal(result, expected)
+
class TestReflect(object):
def test_check_simple(self):
@@ -1307,13 +1353,7 @@ def test_memory_layout_persistence(mode):
assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"]
-@pytest.mark.parametrize("dtype", chain(
- # Skip "other" dtypes as they are not supported by all modes
- np.sctypes["int"],
- np.sctypes["uint"],
- np.sctypes["float"],
- np.sctypes["complex"]
-))
+@pytest.mark.parametrize("dtype", _numeric_dtypes)
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_dtype_persistence(dtype, mode):
arr = np.zeros((3, 2, 1), dtype=dtype)
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index dd8a38248..fd21a7f76 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -600,8 +600,11 @@ class TestUnique(object):
assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)
msg = 'Unique with 3d array and axis=2 failed'
- data3d = np.dstack([data] * 3)
- result = data3d[..., :1]
+ data3d = np.array([[[1, 1],
+ [1, 0]],
+ [[0, 1],
+ [0, 0]]]).astype(dtype)
+ result = np.take(data3d, [1, 0], axis=2)
assert_array_equal(unique(data3d, axis=2), result, msg)
uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index 524915041..21088765f 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -9,6 +9,12 @@ from numpy.testing import (
class TestFinancial(object):
+ def test_npv_irr_congruence(self):
+ # IRR is defined as the rate required for the present value of a
+ # a series of cashflows to be zero i.e. NPV(IRR(x), x) = 0
+ cashflows = np.array([-40000, 5000, 8000, 12000, 30000])
+ assert_allclose(np.npv(np.irr(cashflows), cashflows), 0, atol=1e-10, rtol=0)
+
def test_rate(self):
assert_almost_equal(
np.rate(10, 0, -3500, 10000),
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index eae52c002..1eae8ccfb 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -423,27 +423,17 @@ class TestSelect(object):
assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
def test_deprecated_empty(self):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always")
- assert_equal(select([], [], 3j), 3j)
-
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- assert_warns(DeprecationWarning, select, [], [])
- warnings.simplefilter("error")
- assert_raises(DeprecationWarning, select, [], [])
+ assert_raises(ValueError, select, [], [], 3j)
+ assert_raises(ValueError, select, [], [])
def test_non_bool_deprecation(self):
choices = self.choices
conditions = self.conditions[:]
- with warnings.catch_warnings():
- warnings.filterwarnings("always")
- conditions[0] = conditions[0].astype(np.int_)
- assert_warns(DeprecationWarning, select, conditions, choices)
- conditions[0] = conditions[0].astype(np.uint8)
- assert_warns(DeprecationWarning, select, conditions, choices)
- warnings.filterwarnings("error")
- assert_raises(DeprecationWarning, select, conditions, choices)
+ conditions[0] = conditions[0].astype(np.int_)
+ assert_raises(TypeError, select, conditions, choices)
+ conditions[0] = conditions[0].astype(np.uint8)
+ assert_raises(TypeError, select, conditions, choices)
+ assert_raises(TypeError, select, conditions, choices)
def test_many_arguments(self):
# This used to be limited by NPY_MAXARGS == 32
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index a5cdda074..dbe445c2c 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -175,6 +175,24 @@ class TestRavelUnravelIndex(object):
assert_raises_regex(
ValueError, "out of bounds", np.unravel_index, [1], ())
+ @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])
+ def test_empty_array_ravel(self, mode):
+ res = np.ravel_multi_index(
+ np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)
+ assert(res.shape == (0,))
+
+ with assert_raises(ValueError):
+ np.ravel_multi_index(
+ np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)
+
+ def test_empty_array_unravel(self):
+ res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))
+ # res is a tuple of three empty arrays
+ assert(len(res) == 3)
+ assert(all(a.shape == (0,) for a in res))
+
+ with assert_raises(ValueError):
+ np.unravel_index([1], (2, 1, 0))
class TestGrid(object):
def test_basic(self):
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 78f9f85f3..6ee17c830 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1565,6 +1565,13 @@ M 33 21.99
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
+ # nested but empty fields also aren't supported
+ ndtype = [('idx', int), ('code', object), ('nest', [])]
+ with assert_raises_regex(NotImplementedError,
+ 'Nested fields.* not supported.*'):
+ test = np.genfromtxt(TextIO(data), delimiter=";",
+ dtype=ndtype, converters=converters)
+
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
@@ -1681,6 +1688,10 @@ M 33 21.99
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
+ # when skip_header > 0
+ test = np.genfromtxt(data, skip_header=1)
+ assert_equal(test, np.array([]))
+
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index 0126ccaf8..fa5f4dec2 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -91,8 +91,10 @@ class TestRecFunctions(object):
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
+ # dropping all fields results in an array with no fields
test = drop_fields(a, ['a', 'b'])
- assert_(test is None)
+ control = np.array([(), ()], dtype=[])
+ assert_equal(test, control)
def test_rename_fields(self):
# Test rename fields
@@ -115,6 +117,14 @@ class TestRecFunctions(object):
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
+ ndtype = np.dtype([('a', int), ('b', [])])
+ test = get_names(ndtype)
+ assert_equal(test, ('a', ('b', ())))
+
+ ndtype = np.dtype([])
+ test = get_names(ndtype)
+ assert_equal(test, ())
+
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
@@ -125,6 +135,14 @@ class TestRecFunctions(object):
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
+ ndtype = np.dtype([('a', int), ('b', [])])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ('a', 'b'))
+
+ ndtype = np.dtype([])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ())
+
def test_get_fieldstructure(self):
# Test get_fieldstructure
@@ -147,6 +165,11 @@ class TestRecFunctions(object):
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
+ # 0 fields
+ ndtype = np.dtype([])
+ test = get_fieldstructure(ndtype)
+ assert_equal(test, {})
+
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
@@ -248,7 +271,8 @@ class TestRecFunctions(object):
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
- dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
+ dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
+ ('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
@@ -262,6 +286,40 @@ class TestRecFunctions(object):
assert_equal(res, np.zeros((10, 6), dtype=int))
+ # test nested combinations of subarrays and structured arrays, gh-13333
+ def subarray(dt, shape):
+ return np.dtype((dt, shape))
+
+ def structured(*dts):
+ return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)])
+
+ def inspect(dt, dtype=None):
+ arr = np.zeros((), dt)
+ ret = structured_to_unstructured(arr, dtype=dtype)
+ backarr = unstructured_to_structured(ret, dt)
+ return ret.shape, ret.dtype, backarr.dtype
+
+ dt = structured(subarray(structured(np.int32, np.int32), 3))
+ assert_equal(inspect(dt), ((6,), np.int32, dt))
+
+ dt = structured(subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((4,), np.int32, dt))
+
+ dt = structured(np.int32)
+ assert_equal(inspect(dt), ((1,), np.int32, dt))
+
+ dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((5,), np.int32, dt))
+
+ dt = structured()
+ assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))
+
+ # these currently don't work, but we may make it work in the future
+ assert_raises(NotImplementedError, structured_to_unstructured,
+ np.zeros(3, dt), dtype=np.int32)
+ assert_raises(NotImplementedError, unstructured_to_structured,
+ np.zeros((3,0), dtype=np.int32))
+
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
@@ -322,8 +380,8 @@ class TestMergeArrays(object):
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
- [(1, (2, 3.0)), (4, (5, 6.0))],
- dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ [(1, (2, 3.0, ())), (4, (5, 6.0, ()))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])])
self.data = (w, x, y, z)
def test_solo(self):
@@ -394,8 +452,8 @@ class TestMergeArrays(object):
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
- ('b', [('ba', float), ('bb', int)])])]
- control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
+ ('b', [('ba', float), ('bb', int), ('bc', [])])])]
+ control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))],
dtype=controldtype)
assert_equal(test, control)
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index f3dc6c8e1..f45392188 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -368,7 +368,7 @@ def tri(N, M=None, k=0, dtype=float):
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
- in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
+ in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
Examples
--------
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index ac4b03a6c..586824743 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -395,19 +395,27 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
+
+ .. versionadded:: 1.13
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
+
+ .. versionadded:: 1.17
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
+
+ .. versionadded:: 1.17
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
+
+ .. versionadded:: 1.17
- .. versionadded:: 1.13
+
Returns
-------
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index c7dbcc5f9..3c71d2a7c 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -788,13 +788,8 @@ def lookfor(what, module=None, import_modules=True, regenerate=False,
if kind in ('module', 'object'):
# don't show modules or objects
continue
- ok = True
doc = docstring.lower()
- for w in whats:
- if w not in doc:
- ok = False
- break
- if ok:
+ if all(w in doc for w in whats):
found.append(name)
# Relevance sort
@@ -1003,93 +998,6 @@ def _getmembers(item):
if hasattr(item, x)]
return members
-#-----------------------------------------------------------------------------
-
-# The following SafeEval class and company are adapted from Michael Spencer's
-# ASPN Python Cookbook recipe: https://code.activestate.com/recipes/364469/
-#
-# Accordingly it is mostly Copyright 2006 by Michael Spencer.
-# The recipe, like most of the other ASPN Python Cookbook recipes was made
-# available under the Python license.
-# https://en.wikipedia.org/wiki/Python_License
-
-# It has been modified to:
-# * handle unary -/+
-# * support True/False/None
-# * raise SyntaxError instead of a custom exception.
-
-class SafeEval(object):
- """
- Object to evaluate constant string expressions.
-
- This includes strings with lists, dicts and tuples using the abstract
- syntax tree created by ``compiler.parse``.
-
- .. deprecated:: 1.10.0
-
- See Also
- --------
- safe_eval
-
- """
- def __init__(self):
- # 2014-10-15, 1.10
- warnings.warn("SafeEval is deprecated in 1.10 and will be removed.",
- DeprecationWarning, stacklevel=2)
-
- def visit(self, node):
- cls = node.__class__
- meth = getattr(self, 'visit' + cls.__name__, self.default)
- return meth(node)
-
- def default(self, node):
- raise SyntaxError("Unsupported source construct: %s"
- % node.__class__)
-
- def visitExpression(self, node):
- return self.visit(node.body)
-
- def visitNum(self, node):
- return node.n
-
- def visitStr(self, node):
- return node.s
-
- def visitBytes(self, node):
- return node.s
-
- def visitDict(self, node,**kw):
- return dict([(self.visit(k), self.visit(v))
- for k, v in zip(node.keys, node.values)])
-
- def visitTuple(self, node):
- return tuple([self.visit(i) for i in node.elts])
-
- def visitList(self, node):
- return [self.visit(i) for i in node.elts]
-
- def visitUnaryOp(self, node):
- import ast
- if isinstance(node.op, ast.UAdd):
- return +self.visit(node.operand)
- elif isinstance(node.op, ast.USub):
- return -self.visit(node.operand)
- else:
- raise SyntaxError("Unknown unary op: %r" % node.op)
-
- def visitName(self, node):
- if node.id == 'False':
- return False
- elif node.id == 'True':
- return True
- elif node.id == 'None':
- return None
- else:
- raise SyntaxError("Unknown name: %s" % node.id)
-
- def visitNameConstant(self, node):
- return node.value
-
def safe_eval(source):
"""
diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py
index 4b696c883..55560815d 100644
--- a/numpy/linalg/__init__.py
+++ b/numpy/linalg/__init__.py
@@ -1,53 +1,77 @@
"""
-Core Linear Algebra Tools
-=========================
-
-=============== ==========================================================
-Linear algebra basics
-==========================================================================
-norm Vector or matrix norm
-inv Inverse of a square matrix
-solve Solve a linear system of equations
-det Determinant of a square matrix
-slogdet Logarithm of the determinant of a square matrix
-lstsq Solve linear least-squares problem
-pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
- value decomposition
-matrix_power Integer power of a square matrix
-matrix_rank Calculate matrix rank using an SVD-based method
-=============== ==========================================================
-
-=============== ==========================================================
-Eigenvalues and decompositions
-==========================================================================
-eig Eigenvalues and vectors of a square matrix
-eigh Eigenvalues and eigenvectors of a Hermitian matrix
-eigvals Eigenvalues of a square matrix
-eigvalsh Eigenvalues of a Hermitian matrix
-qr QR decomposition of a matrix
-svd Singular value decomposition of a matrix
-cholesky Cholesky decomposition of a matrix
-=============== ==========================================================
-
-=============== ==========================================================
-Tensor operations
-==========================================================================
-tensorsolve Solve a linear tensor equation
-tensorinv Calculate an inverse of a tensor
-=============== ==========================================================
-
-=============== ==========================================================
+``numpy.linalg``
+================
+
+The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient
+low level implementations of standard linear algebra algorithms. Those
+libraries may be provided by NumPy itself using C versions of a subset of their
+reference implementations but, when possible, highly optimized libraries that
+take advantage of specialized processor functionality are preferred. Examples
+of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries
+are multithreaded and processor dependent, environmental variables and external
+packages such as threadpoolctl may be needed to control the number of threads
+or specify the processor architecture.
+
+- OpenBLAS: https://www.openblas.net/
+- threadpoolctl: https://github.com/joblib/threadpoolctl
+
+Please note that the most-used linear algebra functions in NumPy are present in
+the main ``numpy`` namespace rather than in ``numpy.linalg``. There are:
+``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``,
+``einsum_path`` and ``kron``.
+
+Functions present in numpy.linalg are listed below.
+
+
+Matrix and vector products
+--------------------------
+
+ multi_dot
+ matrix_power
+
+Decompositions
+--------------
+
+ cholesky
+ qr
+ svd
+
+Matrix eigenvalues
+------------------
+
+ eig
+ eigh
+ eigvals
+ eigvalsh
+
+Norms and other numbers
+-----------------------
+
+ norm
+ cond
+ det
+ matrix_rank
+ slogdet
+
+Solving equations and inverting matrices
+----------------------------------------
+
+ solve
+ tensorsolve
+ lstsq
+ inv
+ pinv
+ tensorinv
+
Exceptions
-==========================================================================
-LinAlgError Indicates a failed linear algebra operation
-=============== ==========================================================
+----------
+
+ LinAlgError
"""
from __future__ import division, absolute_import, print_function
# To get sub-modules
-from .info import __doc__
-
from .linalg import *
from numpy._pytesttester import PytestTester
diff --git a/numpy/linalg/info.py b/numpy/linalg/info.py
deleted file mode 100644
index 646ecda04..000000000
--- a/numpy/linalg/info.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""\
-Core Linear Algebra Tools
--------------------------
-Linear algebra basics:
-
-- norm Vector or matrix norm
-- inv Inverse of a square matrix
-- solve Solve a linear system of equations
-- det Determinant of a square matrix
-- lstsq Solve linear least-squares problem
-- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
- value decomposition
-- matrix_power Integer power of a square matrix
-
-Eigenvalues and decompositions:
-
-- eig Eigenvalues and vectors of a square matrix
-- eigh Eigenvalues and eigenvectors of a Hermitian matrix
-- eigvals Eigenvalues of a square matrix
-- eigvalsh Eigenvalues of a Hermitian matrix
-- qr QR decomposition of a matrix
-- svd Singular value decomposition of a matrix
-- cholesky Cholesky decomposition of a matrix
-
-Tensor operations:
-
-- tensorsolve Solve a linear tensor equation
-- tensorinv Calculate an inverse of a tensor
-
-Exceptions:
-
-- LinAlgError Indicates a failed linear algebra operation
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['core']
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 325d35c19..816a200eb 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -778,15 +778,13 @@ def qr(a, mode='reduced'):
----------
a : array_like, shape (M, N)
Matrix to be factored.
- mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
+ mode : {'reduced', 'complete', 'r', 'raw'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
* 'complete' : returns q, r with dimensions (M, M), (M, N)
* 'r' : returns r only with dimensions (K, N)
* 'raw' : returns h, tau with dimensions (N, M), (K,)
- * 'full' : alias of 'reduced', deprecated
- * 'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
@@ -848,12 +846,8 @@ def qr(a, mode='reduced'):
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
- >>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
- >>> # But only triu parts are guaranteed equal when mode='economic'
- >>> np.allclose(r, np.triu(r3[:6,:6], k=0))
- True
Example illustrating a common use of `qr`: solving of least squares
problems
@@ -1487,6 +1481,12 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
+ hermitian : bool, optional
+ If True, `a` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -1504,12 +1504,6 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
- hermitian : bool, optional
- If True, `a` is assumed to be Hermitian (symmetric if real-valued),
- enabling a more efficient method for finding singular values.
- Defaults to False.
-
- .. versionadded:: 1.17.0
Raises
------
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index 9fc68a7aa..ee103c327 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -2522,8 +2522,6 @@ init_@lapack_func@(GESDD_PARAMS_t *params,
params->VT = vt;
params->RWORK = NULL;
params->IWORK = iwork;
- params->M = m;
- params->N = n;
params->LDA = ld;
params->LDU = ld;
params->LDVT = vt_column_count;
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index f221b319a..bb3788c9a 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -59,14 +59,14 @@ __all__ = [
'choose', 'clip', 'common_fill_value', 'compress', 'compressed',
'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh',
'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal',
- 'diff', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp',
+ 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp',
'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask',
'flatten_structured_array', 'floor', 'floor_divide', 'fmod',
'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',
'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',
'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',
- 'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'log2',
+ 'less', 'less_equal', 'log', 'log10', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',
'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',
'masked_array', 'masked_equal', 'masked_greater',
@@ -77,7 +77,7 @@ __all__ = [
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod',
- 'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder',
+ 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder',
'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
@@ -4455,7 +4455,7 @@ class MaskedArray(ndarray):
if m is nomask:
# compare to _count_reduce_items in _methods.py
- if self.shape is ():
+ if self.shape == ():
if axis not in (None, 0):
raise np.AxisError(axis=axis, ndim=self.ndim)
return 1
@@ -5887,7 +5887,6 @@ class MaskedArray(ndarray):
return out[()]
# Array methods
- clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
flatten = _arraymethod('flatten')
@@ -7099,23 +7098,6 @@ def resize(x, new_shape):
return result
-def rank(obj):
- """
- maskedarray version of the numpy function.
-
- .. note::
- Deprecated since 1.10.0
-
- """
- # 2015-04-12, 1.10.0
- warnings.warn(
- "`rank` is deprecated; use the `ndim` function instead. ",
- np.VisibleDeprecationWarning, stacklevel=2)
- return np.ndim(getdata(obj))
-
-rank.__doc__ = np.rank.__doc__
-
-
def ndim(obj):
"""
maskedarray version of the numpy function.
@@ -7904,93 +7886,6 @@ def _pickle_warn(method):
stacklevel=3)
-def dump(a, F):
- """
- Pickle a masked array to a file.
-
- This is a wrapper around ``cPickle.dump``.
-
- Parameters
- ----------
- a : MaskedArray
- The array to be pickled.
- F : str or file-like object
- The file to pickle `a` to. If a string, the full path to the file.
-
- """
- _pickle_warn('dump')
- if not hasattr(F, 'readline'):
- with open(F, 'w') as F:
- pickle.dump(a, F)
- else:
- pickle.dump(a, F)
-
-
-def dumps(a):
- """
- Return a string corresponding to the pickling of a masked array.
-
- This is a wrapper around ``cPickle.dumps``.
-
- Parameters
- ----------
- a : MaskedArray
- The array for which the string representation of the pickle is
- returned.
-
- """
- _pickle_warn('dumps')
- return pickle.dumps(a)
-
-
-def load(F):
- """
- Wrapper around ``cPickle.load`` which accepts either a file-like object
- or a filename.
-
- Parameters
- ----------
- F : str or file
- The file or file name to load.
-
- See Also
- --------
- dump : Pickle an array
-
- Notes
- -----
- This is different from `numpy.load`, which does not use cPickle but loads
- the NumPy binary .npy format.
-
- """
- _pickle_warn('load')
- if not hasattr(F, 'readline'):
- with open(F, 'r') as F:
- return pickle.load(F)
- else:
- return pickle.load(F)
-
-
-def loads(strg):
- """
- Load a pickle from the current string.
-
- The result of ``cPickle.loads(strg)`` is returned.
-
- Parameters
- ----------
- strg : str
- The string to load.
-
- See Also
- --------
- dumps : Return a string corresponding to the pickling of a masked array.
-
- """
- _pickle_warn('loads')
- return pickle.loads(strg)
-
-
def fromfile(file, dtype=float, count=-1, sep=''):
raise NotImplementedError(
"fromfile() not yet implemented for a MaskedArray.")
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 639b3dd1f..de1aa3af8 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -549,8 +549,11 @@ def average(a, axis=None, weights=None, returned=False):
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
- weight equal to one. If `weights` is complex, the imaginary parts
- are ignored.
+ weight equal to one. The 1-D calculation is::
+
+ avg = sum(a * weights) / sum(weights)
+
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 931a7e8b9..826fb0f64 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -208,7 +208,7 @@ class MaskedRecords(MaskedArray, object):
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
- if obj.dtype.fields:
+ if obj.dtype.names is not None:
raise NotImplementedError("MaskedRecords is currently limited to"
"simple records.")
# Get some special attributes
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 9fe550ef8..b72ce56aa 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -3035,6 +3035,13 @@ class TestMaskedArrayMethods(object):
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
+ def test_clip_out(self):
+ # gh-14140
+ a = np.arange(10)
+ m = np.ma.MaskedArray(a, mask=[0, 1] * 5)
+ m.clip(0, 5, out=m)
+ assert_equal(m.mask, [0, 1] * 5)
+
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
diff --git a/numpy/ma/version.py b/numpy/ma/version.py
deleted file mode 100644
index a2c5c42a8..000000000
--- a/numpy/ma/version.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""Version number
-
-"""
-from __future__ import division, absolute_import, print_function
-
-version = '1.00'
-release = False
-
-if not release:
- from . import core
- from . import extras
- revision = [core.__revision__.split(':')[-1][:-1].strip(),
- extras.__revision__.split(':')[-1][:-1].strip(),]
- version += '.dev%04i' % max([int(rev) for rev in revision])
diff --git a/numpy/matlib.py b/numpy/matlib.py
index 9e115943a..604ef470b 100644
--- a/numpy/matlib.py
+++ b/numpy/matlib.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
-# need * as we're copying the numpy namespace
+# need * as we're copying the numpy namespace (FIXME: this makes little sense)
from numpy import *
__version__ = np.__version__
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index a9059f522..35b24d1ab 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -426,10 +426,7 @@ def _vander2d(vander_f, x, y, deg):
x, y, deg :
See the ``<type>vander2d`` functions for more detail
"""
- degx, degy = [
- _deprecate_as_int(d, "degrees")
- for d in deg
- ]
+ degx, degy = deg
x, y = np.array((x, y), copy=False) + 0.0
vx = vander_f(x, degx)
@@ -449,10 +446,7 @@ def _vander3d(vander_f, x, y, z, deg):
x, y, z, deg :
See the ``<type>vander3d`` functions for more detail
"""
- degx, degy, degz = [
- _deprecate_as_int(d, "degrees")
- for d in deg
- ]
+ degx, degy, degz = deg
x, y, z = np.array((x, y, z), copy=False) + 0.0
vx = vander_f(x, degx)
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
index 2d495d67e..f7c248451 100644
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -177,7 +177,11 @@ __all__ = [
'zipf',
]
-from . import mtrand
+# add these for module-freeze analysis (like PyInstaller)
+from . import _pickle
+from . import common
+from . import bounded_integers
+
from .mtrand import *
from .generator import Generator, default_rng
from .bit_generator import SeedSequence
diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py
index d20a91ced..3b58f21e8 100644
--- a/numpy/random/_pickle.py
+++ b/numpy/random/_pickle.py
@@ -13,7 +13,7 @@ BitGenerators = {'MT19937': MT19937,
}
-def __generator_ctor(bit_generator_name='mt19937'):
+def __generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a Generator object
@@ -36,7 +36,7 @@ def __generator_ctor(bit_generator_name='mt19937'):
return Generator(bit_generator())
-def __bit_generator_ctor(bit_generator_name='mt19937'):
+def __bit_generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a bit generator object
@@ -59,7 +59,7 @@ def __bit_generator_ctor(bit_generator_name='mt19937'):
return bit_generator()
-def __randomstate_ctor(bit_generator_name='mt19937'):
+def __randomstate_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a legacy RandomState-like object
diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd
index 79fe69275..984033f17 100644
--- a/numpy/random/bit_generator.pxd
+++ b/numpy/random/bit_generator.pxd
@@ -1,5 +1,5 @@
-from .common cimport bitgen_t
+from .common cimport bitgen_t, uint32_t
cimport numpy as np
cdef class BitGenerator():
@@ -14,9 +14,9 @@ cdef class BitGenerator():
cdef class SeedSequence():
cdef readonly object entropy
cdef readonly tuple spawn_key
- cdef readonly int pool_size
+ cdef readonly uint32_t pool_size
cdef readonly object pool
- cdef readonly int n_children_spawned
+ cdef readonly uint32_t n_children_spawned
cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
np.ndarray[np.npy_uint32, ndim=1] entropy_array)
diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx
index 6694e5e4d..eb608af6c 100644
--- a/numpy/random/bit_generator.pyx
+++ b/numpy/random/bit_generator.pyx
@@ -116,7 +116,7 @@ def _coerce_to_uint32_array(x):
Examples
--------
>>> import numpy as np
- >>> from np.random.bit_generator import _coerce_to_uint32_array
+ >>> from numpy.random.bit_generator import _coerce_to_uint32_array
>>> _coerce_to_uint32_array(12345)
array([12345], dtype=uint32)
>>> _coerce_to_uint32_array('12345')
@@ -458,6 +458,8 @@ cdef class SeedSequence():
-------
seqs : list of `SeedSequence` s
"""
+ cdef uint32_t i
+
seqs = []
for i in range(self.n_children_spawned,
self.n_children_spawned + n_children):
diff --git a/numpy/random/common.pxd b/numpy/random/common.pxd
index 2f7baa06e..ac0a94bb0 100644
--- a/numpy/random/common.pxd
+++ b/numpy/random/common.pxd
@@ -5,7 +5,7 @@ from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
uintptr_t)
from libc.math cimport sqrt
-cdef extern from "numpy/random/bitgen.h":
+cdef extern from "src/bitgen.h":
struct bitgen:
void *state
uint64_t (*next_uint64)(void *st) nogil
diff --git a/numpy/random/common.pyx b/numpy/random/common.pyx
index 6ad5f5b21..74cd5f033 100644
--- a/numpy/random/common.pyx
+++ b/numpy/random/common.pyx
@@ -227,7 +227,7 @@ cdef check_output(object out, object dtype, object size):
raise ValueError('Supplied output array is not contiguous, writable or aligned.')
if out_array.dtype != dtype:
raise TypeError('Supplied output array has the wrong type. '
- 'Expected {0}, got {0}'.format(dtype, out_array.dtype))
+ 'Expected {0}, got {1}'.format(np.dtype(dtype), out_array.dtype))
if size is not None:
try:
tup_size = tuple(size)
diff --git a/numpy/random/entropy.pyx b/numpy/random/entropy.pyx
deleted file mode 100644
index 95bf7c177..000000000
--- a/numpy/random/entropy.pyx
+++ /dev/null
@@ -1,155 +0,0 @@
-cimport numpy as np
-import numpy as np
-
-from libc.stdint cimport uint32_t, uint64_t
-
-__all__ = ['random_entropy', 'seed_by_array']
-
-np.import_array()
-
-cdef extern from "src/splitmix64/splitmix64.h":
- cdef uint64_t splitmix64_next(uint64_t *state) nogil
-
-cdef extern from "src/entropy/entropy.h":
- cdef bint entropy_getbytes(void* dest, size_t size)
- cdef bint entropy_fallback_getbytes(void *dest, size_t size)
-
-cdef Py_ssize_t compute_numel(size):
- cdef Py_ssize_t i, n = 1
- if isinstance(size, tuple):
- for i in range(len(size)):
- n *= size[i]
- else:
- n = size
- return n
-
-
-def seed_by_array(object seed, Py_ssize_t n):
- """
- Transforms a seed array into an initial state
-
- Parameters
- ----------
- seed: ndarray, 1d, uint64
- Array to use. If seed is a scalar, promote to array.
- n : int
- Number of 64-bit unsigned integers required
-
- Notes
- -----
- Uses splitmix64 to perform the transformation
- """
- cdef uint64_t seed_copy = 0
- cdef uint64_t[::1] seed_array
- cdef uint64_t[::1] initial_state
- cdef Py_ssize_t seed_size, iter_bound
- cdef int i, loc = 0
-
- if hasattr(seed, 'squeeze'):
- seed = seed.squeeze()
- arr = np.asarray(seed)
- if arr.shape == ():
- err_msg = 'Scalar seeds must be integers between 0 and 2**64 - 1'
- if not np.isreal(arr):
- raise TypeError(err_msg)
- int_seed = int(seed)
- if int_seed != seed:
- raise TypeError(err_msg)
- if int_seed < 0 or int_seed > 2**64 - 1:
- raise ValueError(err_msg)
- seed_array = np.array([int_seed], dtype=np.uint64)
- elif issubclass(arr.dtype.type, np.inexact):
- raise TypeError('seed array must be integers')
- else:
- err_msg = "Seed values must be integers between 0 and 2**64 - 1"
- obj = np.asarray(seed).astype(np.object)
- if obj.ndim != 1:
- raise ValueError('Array-valued seeds must be 1-dimensional')
- if not np.isreal(obj).all():
- raise TypeError(err_msg)
- if ((obj > int(2**64 - 1)) | (obj < 0)).any():
- raise ValueError(err_msg)
- try:
- obj_int = obj.astype(np.uint64, casting='unsafe')
- except ValueError:
- raise ValueError(err_msg)
- if not (obj == obj_int).all():
- raise TypeError(err_msg)
- seed_array = obj_int
-
- seed_size = seed_array.shape[0]
- iter_bound = n if n > seed_size else seed_size
-
- initial_state = <np.ndarray>np.empty(n, dtype=np.uint64)
- for i in range(iter_bound):
- if i < seed_size:
- seed_copy ^= seed_array[i]
- initial_state[loc] = splitmix64_next(&seed_copy)
- loc += 1
- if loc == n:
- loc = 0
-
- return np.array(initial_state)
-
-
-def random_entropy(size=None, source='system'):
- """
- random_entropy(size=None, source='system')
-
- Read entropy from the system cryptographic provider
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- source : str {'system', 'fallback'}
- Source of entropy. 'system' uses system cryptographic pool.
- 'fallback' uses a hash of the time and process id.
-
- Returns
- -------
- entropy : scalar or array
- Entropy bits in 32-bit unsigned integers. A scalar is returned if size
- is `None`.
-
- Notes
- -----
- On Unix-like machines, reads from ``/dev/urandom``. On Windows machines
- reads from the RSA algorithm provided by the cryptographic service
- provider.
-
- This function reads from the system entropy pool and so samples are
- not reproducible. In particular, it does *NOT* make use of a
- BitGenerator, and so ``seed`` and setting ``state`` have no
- effect.
-
- Raises RuntimeError if the command fails.
- """
- cdef bint success = True
- cdef Py_ssize_t n = 0
- cdef uint32_t random = 0
- cdef uint32_t [:] randoms
-
- if source not in ('system', 'fallback'):
- raise ValueError('Unknown value in source.')
-
- if size is None:
- if source == 'system':
- success = entropy_getbytes(<void *>&random, 4)
- else:
- success = entropy_fallback_getbytes(<void *>&random, 4)
- else:
- n = compute_numel(size)
- randoms = np.zeros(n, dtype=np.uint32)
- if source == 'system':
- success = entropy_getbytes(<void *>(&randoms[0]), 4 * n)
- else:
- success = entropy_fallback_getbytes(<void *>(&randoms[0]), 4 * n)
- if not success:
- raise RuntimeError('Unable to read from system cryptographic provider')
-
- if n == 0:
- return random
- return np.asarray(randoms).reshape(size)
diff --git a/numpy/random/generator.pyx b/numpy/random/generator.pyx
index c7432d8c1..37ac57c06 100644
--- a/numpy/random/generator.pyx
+++ b/numpy/random/generator.pyx
@@ -4,6 +4,7 @@ import operator
import warnings
import numpy as np
+from numpy.core.multiarray import normalize_axis_index
from .bounded_integers import _integers_types
from .pcg64 import PCG64
@@ -3783,20 +3784,21 @@ cdef class Generator:
return diric
# Shuffling and permutations:
- def shuffle(self, object x):
+ def shuffle(self, object x, axis=0):
"""
shuffle(x)
Modify a sequence in-place by shuffling its contents.
- This function only shuffles the array along the first axis of a
- multi-dimensional array. The order of sub-arrays is changed but
- their contents remains the same.
+ The order of sub-arrays is changed but their contents remains the same.
Parameters
----------
x : array_like
The array or list to be shuffled.
+ axis : int, optional
+ The axis which `x` is shuffled along. Default is 0.
+ It is only supported on `ndarray` objects.
Returns
-------
@@ -3810,8 +3812,6 @@ cdef class Generator:
>>> arr
[1 7 5 2 9 4 3 6 0 8] # random
- Multi-dimensional arrays are only shuffled along the first axis:
-
>>> arr = np.arange(9).reshape((3, 3))
>>> rng.shuffle(arr)
>>> arr
@@ -3819,17 +3819,25 @@ cdef class Generator:
[6, 7, 8],
[0, 1, 2]])
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.shuffle(arr, axis=1)
+ >>> arr
+ array([[2, 0, 1], # random
+ [5, 3, 4],
+ [8, 6, 7]])
"""
cdef:
np.npy_intp i, j, n = len(x), stride, itemsize
char* x_ptr
char* buf_ptr
+ axis = normalize_axis_index(axis, np.ndim(x))
+
if type(x) is np.ndarray and x.ndim == 1 and x.size:
# Fast, statically typed path: shuffle the underlying buffer.
# Only for non-empty, 1d objects of class ndarray (subclasses such
# as MaskedArrays may not support this approach).
- x_ptr = <char*><size_t>x.ctypes.data
+ x_ptr = <char*><size_t>np.PyArray_DATA(x)
stride = x.strides[0]
itemsize = x.dtype.itemsize
# As the array x could contain python objects we use a buffer
@@ -3837,7 +3845,7 @@ cdef class Generator:
# within the buffer and erroneously decrementing it's refcount
# when the function exits.
buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
- buf_ptr = <char*><size_t>buf.ctypes.data
+ buf_ptr = <char*><size_t>np.PyArray_DATA(buf)
with self.lock:
# We trick gcc into providing a specialized implementation for
# the most common case, yielding a ~33% performance improvement.
@@ -3847,6 +3855,7 @@ cdef class Generator:
else:
self._shuffle_raw(n, 1, itemsize, stride, x_ptr, buf_ptr)
elif isinstance(x, np.ndarray) and x.ndim and x.size:
+ x = np.swapaxes(x, 0, axis)
buf = np.empty_like(x[0, ...])
with self.lock:
for i in reversed(range(1, n)):
@@ -3859,6 +3868,9 @@ cdef class Generator:
x[i] = buf
else:
# Untyped path.
+ if axis != 0:
+ raise NotImplementedError("Axis argument is only supported "
+ "on ndarray objects")
with self.lock:
for i in reversed(range(1, n)):
j = random_interval(&self._bitgen, i)
@@ -3914,21 +3926,20 @@ cdef class Generator:
data[j] = data[i]
data[i] = temp
- def permutation(self, object x):
+ def permutation(self, object x, axis=0):
"""
permutation(x)
Randomly permute a sequence, or return a permuted range.
- If `x` is a multi-dimensional array, it is only shuffled along its
- first index.
-
Parameters
----------
x : int or array_like
If `x` is an integer, randomly permute ``np.arange(x)``.
If `x` is an array, make a copy and shuffle the elements
randomly.
+ axis : int, optional
+ The axis which `x` is shuffled along. Default is 0.
Returns
-------
@@ -3950,6 +3961,17 @@ cdef class Generator:
[0, 1, 2],
[3, 4, 5]])
+ >>> rng.permutation("abc")
+ Traceback (most recent call last):
+ ...
+ numpy.AxisError: x must be an integer or at least 1-dimensional
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.permutation(arr, axis=1)
+ array([[0, 2, 1], # random
+ [3, 5, 4],
+ [6, 8, 7]])
+
"""
if isinstance(x, (int, np.integer)):
arr = np.arange(x)
@@ -3958,6 +3980,8 @@ cdef class Generator:
arr = np.asarray(x)
+ axis = normalize_axis_index(axis, arr.ndim)
+
# shuffle has fast-path for 1-d
if arr.ndim == 1:
# Return a copy if same memory
@@ -3967,9 +3991,11 @@ cdef class Generator:
return arr
# Shuffle index array, dtype to ensure fast path
- idx = np.arange(arr.shape[0], dtype=np.intp)
+ idx = np.arange(arr.shape[axis], dtype=np.intp)
self.shuffle(idx)
- return arr[idx]
+ slices = [slice(None)]*arr.ndim
+ slices[axis] = idx
+ return arr[tuple(slices)]
def default_rng(seed=None):
diff --git a/numpy/random/info.py b/numpy/random/info.py
deleted file mode 100644
index b9fd7f26a..000000000
--- a/numpy/random/info.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-from .. import __doc__
-
-depends = ['core']
diff --git a/numpy/random/legacy_distributions.pxd b/numpy/random/legacy_distributions.pxd
index 7ba058054..c681388db 100644
--- a/numpy/random/legacy_distributions.pxd
+++ b/numpy/random/legacy_distributions.pxd
@@ -34,6 +34,8 @@ cdef extern from "legacy-distributions.h":
double nonc) nogil
double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale) nogil
double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma) nogil
+ int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial) nogil
int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) nogil
int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample) nogil
int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) nogil
diff --git a/numpy/random/mt19937.pyx b/numpy/random/mt19937.pyx
index 49c3622f5..7d0f6cd22 100644
--- a/numpy/random/mt19937.pyx
+++ b/numpy/random/mt19937.pyx
@@ -5,7 +5,6 @@ cimport numpy as np
from .common cimport *
from .bit_generator cimport BitGenerator, SeedSequence
-from .entropy import random_entropy
__all__ = ['MT19937']
@@ -156,7 +155,8 @@ cdef class MT19937(BitGenerator):
Random seed initializing the pseudo-random number generator.
Can be an integer in [0, 2**32-1], array of integers in
[0, 2**32-1], a `SeedSequence, or ``None``. If `seed`
- is ``None``, then sample entropy for a seed.
+ is ``None``, then fresh, unpredictable entropy will be pulled from
+ the OS.
Raises
------
@@ -167,7 +167,8 @@ cdef class MT19937(BitGenerator):
with self.lock:
try:
if seed is None:
- val = random_entropy(RK_STATE_LEN)
+ seed = SeedSequence()
+ val = seed.generate_state(RK_STATE_LEN)
# MSB is 1; assuring non-zero initial array
self.rng_state.key[0] = 0x80000000UL
for i in range(1, RK_STATE_LEN):
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index 46b6b3388..c469a4645 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -83,8 +83,8 @@ cdef class RandomState:
See Also
--------
Generator
- mt19937.MT19937
- Bit_Generators
+ MT19937
+ :ref:`bit_generator`
"""
cdef public object _bit_generator
@@ -3086,7 +3086,9 @@ cdef class RandomState:
for i in range(cnt):
_dp = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
_in = (<long*>np.PyArray_MultiIter_DATA(it, 2))[0]
- (<long*>np.PyArray_MultiIter_DATA(it, 0))[0] = random_binomial(&self._bitgen, _dp, _in, &self._binomial)
+ (<long*>np.PyArray_MultiIter_DATA(it, 0))[0] = \
+ legacy_random_binomial(&self._bitgen, _dp, _in,
+ &self._binomial)
np.PyArray_MultiIter_NEXT(it)
@@ -3099,7 +3101,8 @@ cdef class RandomState:
if size is None:
with self.lock:
- return random_binomial(&self._bitgen, _dp, _in, &self._binomial)
+ return <long>legacy_random_binomial(&self._bitgen, _dp, _in,
+ &self._binomial)
randoms = <np.ndarray>np.empty(size, int)
cnt = np.PyArray_SIZE(randoms)
@@ -3107,8 +3110,8 @@ cdef class RandomState:
with self.lock, nogil:
for i in range(cnt):
- randoms_data[i] = random_binomial(&self._bitgen, _dp, _in,
- &self._binomial)
+ randoms_data[i] = legacy_random_binomial(&self._bitgen, _dp, _in,
+ &self._binomial)
return randoms
@@ -3517,7 +3520,7 @@ cdef class RandomState:
# Convert to int64, if necessary, to use int64 infrastructure
ongood = ongood.astype(np.int64)
onbad = onbad.astype(np.int64)
- onbad = onbad.astype(np.int64)
+ onsample = onsample.astype(np.int64)
out = discrete_broadcast_iii(&legacy_random_hypergeometric,&self._bitgen, size, self.lock,
ongood, 'ngood', CONS_NON_NEGATIVE,
onbad, 'nbad', CONS_NON_NEGATIVE,
@@ -4070,7 +4073,7 @@ cdef class RandomState:
# Fast, statically typed path: shuffle the underlying buffer.
# Only for non-empty, 1d objects of class ndarray (subclasses such
# as MaskedArrays may not support this approach).
- x_ptr = <char*><size_t>x.ctypes.data
+ x_ptr = <char*><size_t>np.PyArray_DATA(x)
stride = x.strides[0]
itemsize = x.dtype.itemsize
# As the array x could contain python objects we use a buffer
@@ -4078,7 +4081,7 @@ cdef class RandomState:
# within the buffer and erroneously decrementing it's refcount
# when the function exits.
buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
- buf_ptr = <char*><size_t>buf.ctypes.data
+ buf_ptr = <char*><size_t>np.PyArray_DATA(buf)
with self.lock:
# We trick gcc into providing a specialized implementation for
# the most common case, yielding a ~33% performance improvement.
@@ -4134,6 +4137,7 @@ cdef class RandomState:
out : ndarray
Permuted sequence or array range.
+
Examples
--------
>>> np.random.permutation(10)
@@ -4149,12 +4153,15 @@ cdef class RandomState:
[3, 4, 5]])
"""
+
if isinstance(x, (int, np.integer)):
arr = np.arange(x)
self.shuffle(arr)
return arr
arr = np.asarray(x)
+ if arr.ndim < 1:
+ raise IndexError("x must be an integer or at least 1-dimensional")
# shuffle has fast-path for 1-d
if arr.ndim == 1:
diff --git a/numpy/random/setup.py b/numpy/random/setup.py
index a1bf3b83c..ce7f0565f 100644
--- a/numpy/random/setup.py
+++ b/numpy/random/setup.py
@@ -34,8 +34,6 @@ def configuration(parent_package='', top_path=None):
defs.append(('NPY_NO_DEPRECATED_API', 0))
config.add_data_dir('tests')
- config.add_data_files('common.pxd')
- config.add_data_files('bit_generator.pxd')
EXTRA_LINK_ARGS = []
# Math lib
@@ -49,8 +47,8 @@ def configuration(parent_package='', top_path=None):
elif not is_msvc:
# Some bit generators require c99
EXTRA_COMPILE_ARGS += ['-std=c99']
- INTEL_LIKE = any([val in k.lower() for k in platform.uname()
- for val in ('x86', 'i686', 'i386', 'amd64')])
+ INTEL_LIKE = any(arch in platform.machine()
+ for arch in ('x86', 'i686', 'i386', 'amd64'))
if INTEL_LIKE:
# Assumes GCC or GCC-like compiler
EXTRA_COMPILE_ARGS += ['-msse2']
@@ -61,18 +59,6 @@ def configuration(parent_package='', top_path=None):
# One can force emulated 128-bit arithmetic if one wants.
#PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')]
- config.add_extension('entropy',
- sources=['entropy.c', 'src/entropy/entropy.c'] +
- [generate_libraries],
- libraries=EXTRA_LIBRARIES,
- extra_compile_args=EXTRA_COMPILE_ARGS,
- extra_link_args=EXTRA_LINK_ARGS,
- depends=[join('src', 'splitmix64', 'splitmix.h'),
- join('src', 'entropy', 'entropy.h'),
- 'entropy.pyx',
- ],
- define_macros=defs,
- )
for gen in ['mt19937']:
# gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
config.add_extension(gen,
diff --git a/numpy/core/include/numpy/random/bitgen.h b/numpy/random/src/bitgen.h
index 0adaaf2ee..0adaaf2ee 100644
--- a/numpy/core/include/numpy/random/bitgen.h
+++ b/numpy/random/src/bitgen.h
diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c
index 65257ecbf..1244ffe65 100644
--- a/numpy/random/src/distributions/distributions.c
+++ b/numpy/random/src/distributions/distributions.c
@@ -901,8 +901,8 @@ RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, RAND_INT_TYPE n,
return X;
}
-RAND_INT_TYPE random_binomial(bitgen_t *bitgen_state, double p, RAND_INT_TYPE n,
- binomial_t *binomial) {
+int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n,
+ binomial_t *binomial) {
double q;
if ((n == 0LL) || (p == 0.0f))
@@ -1478,7 +1478,7 @@ uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
uint64_t rng, uint64_t mask, bool use_masked) {
if (rng == 0) {
return off;
- } else if (rng < 0xFFFFFFFFUL) {
+ } else if (rng <= 0xFFFFFFFFUL) {
/* Call 32-bit generator if range in 32-bit. */
if (use_masked) {
return off + buffered_bounded_masked_uint32(bitgen_state, rng, mask, NULL,
@@ -1592,7 +1592,7 @@ void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
for (i = 0; i < cnt; i++) {
out[i] = off;
}
- } else if (rng < 0xFFFFFFFFUL) {
+ } else if (rng <= 0xFFFFFFFFUL) {
uint32_t buf = 0;
int bcnt = 0;
diff --git a/numpy/random/src/distributions/distributions.h b/numpy/random/src/distributions/distributions.h
index c8cdfd20f..2a6b2a045 100644
--- a/numpy/random/src/distributions/distributions.h
+++ b/numpy/random/src/distributions/distributions.h
@@ -1,15 +1,14 @@
#ifndef _RANDOMDGEN__DISTRIBUTIONS_H_
#define _RANDOMDGEN__DISTRIBUTIONS_H_
-#pragma once
+#include "Python.h"
+#include "numpy/npy_common.h"
#include <stddef.h>
#include <stdbool.h>
#include <stdint.h>
-#include "Python.h"
-#include "numpy/npy_common.h"
#include "numpy/npy_math.h"
-#include "numpy/random/bitgen.h"
+#include "src/bitgen.h"
/*
* RAND_INT_TYPE is used to share integer generators with RandomState which
@@ -43,11 +42,11 @@
typedef struct s_binomial_t {
int has_binomial; /* !=0: following parameters initialized for binomial */
double psave;
- int64_t nsave;
+ RAND_INT_TYPE nsave;
double r;
double q;
double fm;
- int64_t m;
+ RAND_INT_TYPE m;
double p1;
double xm;
double xl;
@@ -148,8 +147,18 @@ DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mod
DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
double p);
-DECLDIR RAND_INT_TYPE random_binomial(bitgen_t *bitgen_state, double p, RAND_INT_TYPE n,
- binomial_t *binomial);
+
+DECLDIR RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+DECLDIR RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial);
+
DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p);
DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p);
DECLDIR RAND_INT_TYPE random_geometric_inversion(bitgen_t *bitgen_state, double p);
diff --git a/numpy/random/src/distributions/random_hypergeometric.c b/numpy/random/src/distributions/random_hypergeometric.c
index 59a3a4b9b..94dc6380f 100644
--- a/numpy/random/src/distributions/random_hypergeometric.c
+++ b/numpy/random/src/distributions/random_hypergeometric.c
@@ -1,6 +1,6 @@
-#include <stdint.h>
#include "distributions.h"
#include "logfactorial.h"
+#include <stdint.h>
/*
* Generate a sample from the hypergeometric distribution.
diff --git a/numpy/random/src/entropy/entropy.c b/numpy/random/src/entropy/entropy.c
deleted file mode 100644
index eaca37a9c..000000000
--- a/numpy/random/src/entropy/entropy.c
+++ /dev/null
@@ -1,114 +0,0 @@
-#include <stddef.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "entropy.h"
-#ifdef _WIN32
-/* Windows */
-#include <sys/timeb.h>
-#include <time.h>
-#include <windows.h>
-
-#include <wincrypt.h>
-#else
-/* Unix */
-#include <sys/time.h>
-#include <time.h>
-#include <unistd.h>
-#include <fcntl.h>
-#endif
-
-bool entropy_getbytes(void *dest, size_t size) {
-#ifndef _WIN32
-
- int fd = open("/dev/urandom", O_RDONLY);
- if (fd < 0)
- return false;
- ssize_t sz = read(fd, dest, size);
- if ((sz < 0) || ((size_t)sz < size))
- return false;
- return close(fd) == 0;
-
-#else
-
- HCRYPTPROV hCryptProv;
- BOOL done;
-
- if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL,
- CRYPT_VERIFYCONTEXT) ||
- !hCryptProv) {
- return true;
- }
- done = CryptGenRandom(hCryptProv, (DWORD)size, (unsigned char *)dest);
- CryptReleaseContext(hCryptProv, 0);
- if (!done) {
- return false;
- }
-
- return true;
-#endif
-}
-
-/* Thomas Wang 32/64 bits integer hash function */
-uint32_t entropy_hash_32(uint32_t key) {
- key += ~(key << 15);
- key ^= (key >> 10);
- key += (key << 3);
- key ^= (key >> 6);
- key += ~(key << 11);
- key ^= (key >> 16);
- return key;
-}
-
-uint64_t entropy_hash_64(uint64_t key) {
- key = (~key) + (key << 21); // key = (key << 21) - key - 1;
- key = key ^ (key >> 24);
- key = (key + (key << 3)) + (key << 8); // key * 265
- key = key ^ (key >> 14);
- key = (key + (key << 2)) + (key << 4); // key * 21
- key = key ^ (key >> 28);
- key = key + (key << 31);
- return key;
-}
-
-uint32_t entropy_randombytes(void) {
-
-#ifndef _WIN32
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return entropy_hash_32(getpid()) ^ entropy_hash_32(tv.tv_sec) ^
- entropy_hash_32(tv.tv_usec) ^ entropy_hash_32(clock());
-#else
- uint32_t out = 0;
- int64_t counter;
- struct _timeb tv;
- _ftime_s(&tv);
- out = entropy_hash_32(GetCurrentProcessId()) ^
- entropy_hash_32((uint32_t)tv.time) ^ entropy_hash_32(tv.millitm) ^
- entropy_hash_32(clock());
- if (QueryPerformanceCounter((LARGE_INTEGER *)&counter) != 0)
- out ^= entropy_hash_32((uint32_t)(counter & 0xffffffff));
- return out;
-#endif
-}
-
-bool entropy_fallback_getbytes(void *dest, size_t size) {
- int hashes = (int)size;
- uint32_t *hash = malloc(hashes * sizeof(uint32_t));
- int i;
- for (i = 0; i < hashes; i++) {
- hash[i] = entropy_randombytes();
- }
- memcpy(dest, (void *)hash, size);
- free(hash);
- return true;
-}
-
-void entropy_fill(void *dest, size_t size) {
- bool success;
- success = entropy_getbytes(dest, size);
- if (!success) {
- entropy_fallback_getbytes(dest, size);
- }
-}
diff --git a/numpy/random/src/entropy/entropy.h b/numpy/random/src/entropy/entropy.h
deleted file mode 100644
index f00caf61d..000000000
--- a/numpy/random/src/entropy/entropy.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _RANDOMDGEN__ENTROPY_H_
-#define _RANDOMDGEN__ENTROPY_H_
-
-#include <stddef.h>
-#include <stdbool.h>
-#include <stdint.h>
-
-extern void entropy_fill(void *dest, size_t size);
-
-extern bool entropy_getbytes(void *dest, size_t size);
-
-extern bool entropy_fallback_getbytes(void *dest, size_t size);
-
-#endif
diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c
index 4741a0352..684b3d762 100644
--- a/numpy/random/src/legacy/legacy-distributions.c
+++ b/numpy/random/src/legacy/legacy-distributions.c
@@ -215,6 +215,37 @@ double legacy_exponential(aug_bitgen_t *aug_state, double scale) {
}
+static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state,
+ double p,
+ RAND_INT_TYPE n,
+ binomial_t *binomial) {
+ double q;
+
+ if (p <= 0.5) {
+ if (p * n <= 30.0) {
+ return random_binomial_inversion(bitgen_state, n, p, binomial);
+ } else {
+ return random_binomial_btpe(bitgen_state, n, p, binomial);
+ }
+ } else {
+ q = 1.0 - p;
+ if (q * n <= 30.0) {
+ return n - random_binomial_inversion(bitgen_state, n, q, binomial);
+ } else {
+ return n - random_binomial_btpe(bitgen_state, n, q, binomial);
+ }
+ }
+}
+
+
+int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial) {
+ return (int64_t) legacy_random_binomial_original(bitgen_state, p,
+ (RAND_INT_TYPE) n,
+ binomial);
+}
+
+
static RAND_INT_TYPE random_hypergeometric_hyp(bitgen_t *bitgen_state,
RAND_INT_TYPE good,
RAND_INT_TYPE bad,
diff --git a/numpy/random/src/legacy/legacy-distributions.h b/numpy/random/src/legacy/legacy-distributions.h
index 005c4e5d2..4bc15d58e 100644
--- a/numpy/random/src/legacy/legacy-distributions.h
+++ b/numpy/random/src/legacy/legacy-distributions.h
@@ -16,26 +16,23 @@ extern double legacy_pareto(aug_bitgen_t *aug_state, double a);
extern double legacy_weibull(aug_bitgen_t *aug_state, double a);
extern double legacy_power(aug_bitgen_t *aug_state, double a);
extern double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale);
-extern double legacy_pareto(aug_bitgen_t *aug_state, double a);
-extern double legacy_weibull(aug_bitgen_t *aug_state, double a);
extern double legacy_chisquare(aug_bitgen_t *aug_state, double df);
extern double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
double nonc);
-
extern double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum,
double dfden, double nonc);
extern double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale);
extern double legacy_lognormal(aug_bitgen_t *aug_state, double mean,
double sigma);
extern double legacy_standard_t(aug_bitgen_t *aug_state, double df);
-extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n,
- double p);
extern double legacy_standard_cauchy(aug_bitgen_t *state);
extern double legacy_beta(aug_bitgen_t *aug_state, double a, double b);
extern double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden);
extern double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale);
extern double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape);
extern double legacy_exponential(aug_bitgen_t *aug_state, double scale);
+extern int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial);
extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n,
double p);
extern int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state,
diff --git a/numpy/random/src/philox/philox.h b/numpy/random/src/philox/philox.h
index 309d89eae..c72424a97 100644
--- a/numpy/random/src/philox/philox.h
+++ b/numpy/random/src/philox/philox.h
@@ -1,8 +1,8 @@
#ifndef _RANDOMDGEN__PHILOX_H_
#define _RANDOMDGEN__PHILOX_H_
-#include <inttypes.h>
#include "numpy/npy_common.h"
+#include <inttypes.h>
#define PHILOX_BUFFER_SIZE 4L
diff --git a/numpy/random/src/sfc64/sfc64.h b/numpy/random/src/sfc64/sfc64.h
index 6674ae69c..75c4118d3 100644
--- a/numpy/random/src/sfc64/sfc64.h
+++ b/numpy/random/src/sfc64/sfc64.h
@@ -1,11 +1,11 @@
#ifndef _RANDOMDGEN__SFC64_H_
#define _RANDOMDGEN__SFC64_H_
+#include "numpy/npy_common.h"
#include <inttypes.h>
#ifdef _WIN32
#include <stdlib.h>
#endif
-#include "numpy/npy_common.h"
typedef struct s_sfc64_state {
uint64_t s[4];
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index a962fe84e..20bc10cd0 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -732,6 +732,20 @@ class TestRandomDist(object):
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
+ def test_shuffle_custom_axis(self):
+ random = Generator(MT19937(self.seed))
+ actual = np.arange(16).reshape((4, 4))
+ random.shuffle(actual, axis=1)
+ desired = np.array([[ 0, 3, 1, 2],
+ [ 4, 7, 5, 6],
+ [ 8, 11, 9, 10],
+ [12, 15, 13, 14]])
+ assert_array_equal(actual, desired)
+ random = Generator(MT19937(self.seed))
+ actual = np.arange(16).reshape((4, 4))
+ random.shuffle(actual, axis=-1)
+ assert_array_equal(actual, desired)
+
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
@@ -746,6 +760,16 @@ class TestRandomDist(object):
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
+ def test_shuffle_exceptions(self):
+ random = Generator(MT19937(self.seed))
+ arr = np.arange(10)
+ assert_raises(np.AxisError, random.shuffle, arr, 1)
+ arr = np.arange(9).reshape((3, 3))
+ assert_raises(np.AxisError, random.shuffle, arr, 3)
+ assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
+ arr = [[1, 2, 3], [4, 5, 6]]
+ assert_raises(NotImplementedError, random.shuffle, arr, 1)
+
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
@@ -757,6 +781,40 @@ class TestRandomDist(object):
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
+
+ bad_x_str = "abcd"
+ assert_raises(np.AxisError, random.permutation, bad_x_str)
+
+ bad_x_float = 1.2
+ assert_raises(np.AxisError, random.permutation, bad_x_float)
+
+ random = Generator(MT19937(self.seed))
+ integer_val = 10
+ desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
+
+ actual = random.permutation(integer_val)
+ assert_array_equal(actual, desired)
+
+ def test_permutation_custom_axis(self):
+ a = np.arange(16).reshape((4, 4))
+ desired = np.array([[ 0, 3, 1, 2],
+ [ 4, 7, 5, 6],
+ [ 8, 11, 9, 10],
+ [12, 15, 13, 14]])
+ random = Generator(MT19937(self.seed))
+ actual = random.permutation(a, axis=1)
+ assert_array_equal(actual, desired)
+ random = Generator(MT19937(self.seed))
+ actual = random.permutation(a, axis=-1)
+ assert_array_equal(actual, desired)
+
+ def test_permutation_exceptions(self):
+ random = Generator(MT19937(self.seed))
+ arr = np.arange(10)
+ assert_raises(np.AxisError, random.permutation, arr, 1)
+ arr = np.arange(9).reshape((3, 3))
+ assert_raises(np.AxisError, random.permutation, arr, 3)
+ assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py
index 3b5a279a3..a0edc5c23 100644
--- a/numpy/random/tests/test_randomstate.py
+++ b/numpy/random/tests/test_randomstate.py
@@ -686,6 +686,21 @@ class TestRandomDist(object):
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
+ random.seed(self.seed)
+ bad_x_str = "abcd"
+ assert_raises(IndexError, random.permutation, bad_x_str)
+
+ random.seed(self.seed)
+ bad_x_float = 1.2
+ assert_raises(IndexError, random.permutation, bad_x_float)
+
+ integer_val = 10
+ desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2]
+
+ random.seed(self.seed)
+ actual = random.permutation(integer_val)
+ assert_array_equal(actual, desired)
+
def test_beta(self):
random.seed(self.seed)
actual = random.beta(.1, .9, size=(3, 2))
diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py
index 29870534a..edf32ea97 100644
--- a/numpy/random/tests/test_randomstate_regression.py
+++ b/numpy/random/tests/test_randomstate_regression.py
@@ -181,3 +181,30 @@ class TestRegression(object):
assert c.dtype == np.dtype(int)
c = np.random.choice(10, replace=False, size=2)
assert c.dtype == np.dtype(int)
+
+ @pytest.mark.skipif(np.iinfo('l').max < 2**32,
+ reason='Cannot test with 32-bit C long')
+ def test_randint_117(self):
+ # GH 14189
+ random.seed(0)
+ expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
+ 2588848963, 3684848379, 2340255427, 3638918503,
+ 1819583497, 2678185683], dtype='int64')
+ actual = random.randint(2**32, size=10)
+ assert_array_equal(actual, expected)
+
+ def test_p_zero_stream(self):
+ # Regression test for gh-14522. Ensure that future versions
+ # generate the same variates as version 1.16.
+ np.random.seed(12345)
+ assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
+ [0, 0, 0, 1, 1])
+
+ def test_n_zero_stream(self):
+ # Regression test for gh-14522. Ensure that future versions
+ # generate the same variates as version 1.16.
+ np.random.seed(8675309)
+ expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
+ assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
+ expected)
diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py
index 84d261e5e..6e641b5f4 100644
--- a/numpy/random/tests/test_smoke.py
+++ b/numpy/random/tests/test_smoke.py
@@ -5,7 +5,7 @@ from functools import partial
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_, assert_array_equal
-from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64, entropy)
+from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64)
@pytest.fixture(scope='module',
params=(np.bool, np.int8, np.int16, np.int32, np.int64,
@@ -806,23 +806,3 @@ class TestDefaultRNG(RNG):
np.random.default_rng(-1)
with pytest.raises(ValueError):
np.random.default_rng([12345, -1])
-
-
-class TestEntropy(object):
- def test_entropy(self):
- e1 = entropy.random_entropy()
- e2 = entropy.random_entropy()
- assert_((e1 != e2))
- e1 = entropy.random_entropy(10)
- e2 = entropy.random_entropy(10)
- assert_((e1 != e2).all())
- e1 = entropy.random_entropy(10, source='system')
- e2 = entropy.random_entropy(10, source='system')
- assert_((e1 != e2).all())
-
- def test_fallback(self):
- e1 = entropy.random_entropy(source='fallback')
- time.sleep(0.1)
- e2 = entropy.random_entropy(source='fallback')
- assert_((e1 != e2))
-
diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py
index a5fa4fb5e..489d8e09a 100644
--- a/numpy/testing/_private/parameterized.py
+++ b/numpy/testing/_private/parameterized.py
@@ -45,11 +45,18 @@ except ImportError:
from unittest import TestCase
-PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
-if PY3:
+if PY2:
+ from types import InstanceType
+ lzip = zip
+ text_type = unicode
+ bytes_type = str
+ string_types = basestring,
+ def make_method(func, instance, type):
+ return MethodType(func, instance, type)
+else:
# Python 3 doesn't have an InstanceType, so just use a dummy type.
class InstanceType():
pass
@@ -61,14 +68,6 @@ if PY3:
if instance is None:
return func
return MethodType(func, instance)
-else:
- from types import InstanceType
- lzip = zip
- text_type = unicode
- bytes_type = str
- string_types = basestring,
- def make_method(func, instance, type):
- return MethodType(func, instance, type)
_param = namedtuple("param", "args kwargs")
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 87e66e06f..8a31fcf15 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -21,7 +21,6 @@ import pprint
from numpy.core import(
intp, float32, empty, arange, array_repr, ndarray, isnat, array)
-from numpy.lib.utils import deprecate
if sys.version_info[0] >= 3:
from io import StringIO
@@ -33,7 +32,7 @@ __all__ = [
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
- 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'raises', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
@@ -154,22 +153,6 @@ def gisinf(x):
return st
-@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
- "Use numpy.random.rand instead.")
-def rand(*args):
- """Returns an array of random numbers with the given shape.
-
- This only uses the standard library, so it is useful for testing purposes.
- """
- import random
- from numpy.core import zeros, float64
- results = zeros(args, float64)
- f = results.flat
- for i in range(len(f)):
- f[i] = random.random()
- return results
-
-
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
@@ -703,7 +686,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
header='', precision=6, equal_nan=True,
equal_inf=True):
__tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import array, array2string, isnan, inf, bool_, errstate
+ from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
@@ -804,15 +787,19 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
- mismatch = 100. * (reduced.size - reduced.sum(dtype=intp)) / ox.size
- remarks = ['Mismatch: {:.3g}%'.format(mismatch)]
+ n_mismatch = reduced.size - reduced.sum(dtype=intp)
+ n_elements = flagged.size if flagged.ndim != 0 else reduced.size
+ percent_mismatch = 100 * n_mismatch / n_elements
+ remarks = [
+ 'Mismatched elements: {} / {} ({:.3g}%)'.format(
+ n_mismatch, n_elements, percent_mismatch)]
with errstate(invalid='ignore', divide='ignore'):
# ignore errors for non-numeric types
with contextlib.suppress(TypeError):
error = abs(x - y)
- max_abs_error = error.max()
- if error.dtype == 'object':
+ max_abs_error = max(error)
+ if getattr(error, 'dtype', object_) == object_:
remarks.append('Max absolute difference: '
+ str(max_abs_error))
else:
@@ -821,8 +808,13 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
# note: this definition of relative error matches that one
# used by assert_allclose (found in np.isclose)
- max_rel_error = (error / abs(y)).max()
- if error.dtype == 'object':
+ # Filter values where the divisor would be zero
+ nonzero = bool_(y != 0)
+ if all(~nonzero):
+ max_rel_error = array(inf)
+ else:
+ max_rel_error = max(error[nonzero] / abs(y[nonzero]))
+ if getattr(error, 'dtype', object_) == object_:
remarks.append('Max relative difference: '
+ str(max_rel_error))
else:
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
deleted file mode 100644
index bf78be500..000000000
--- a/numpy/testing/decorators.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
-Back compatibility decorators module. It will import the appropriate
-set of tools
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-
-# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.decorators is deprecated "
- "since numpy 1.15.0, import from numpy.testing instead.",
- DeprecationWarning, stacklevel=2)
-
-from ._private.decorators import *
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
deleted file mode 100644
index 5748a9a0f..000000000
--- a/numpy/testing/noseclasses.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-Back compatibility noseclasses module. It will import the appropriate
-set of tools
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-
-# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.noseclasses is deprecated "
- "since 1.15.0, import from numpy.testing instead",
- DeprecationWarning, stacklevel=2)
-
-from ._private.noseclasses import *
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
deleted file mode 100644
index 2ac212eee..000000000
--- a/numpy/testing/nosetester.py
+++ /dev/null
@@ -1,19 +0,0 @@
-"""
-Back compatibility nosetester module. It will import the appropriate
-set of tools
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-
-# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.nosetester is deprecated "
- "since 1.15.0, import from numpy.testing instead.",
- DeprecationWarning, stacklevel=2)
-
-from ._private.nosetester import *
-
-__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
- '_numpy_tester', 'get_package_name', 'import_nose',
- 'suppress_warnings']
diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py
index 3a359f472..72b22cee1 100755
--- a/numpy/testing/print_coercion_tables.py
+++ b/numpy/testing/print_coercion_tables.py
@@ -70,22 +70,24 @@ def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray,
print(char, end=' ')
print()
-print("can cast")
-print_cancast_table(np.typecodes['All'])
-print()
-print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
-print()
-print("scalar + scalar")
-print_coercion_table(np.typecodes['All'], 0, 0, False)
-print()
-print("scalar + neg scalar")
-print_coercion_table(np.typecodes['All'], 0, -1, False)
-print()
-print("array + scalar")
-print_coercion_table(np.typecodes['All'], 0, 0, True)
-print()
-print("array + neg scalar")
-print_coercion_table(np.typecodes['All'], 0, -1, True)
-print()
-print("promote_types")
-print_coercion_table(np.typecodes['All'], 0, 0, False, True)
+
+if __name__ == '__main__':
+ print("can cast")
+ print_cancast_table(np.typecodes['All'])
+ print()
+ print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
+ print()
+ print("scalar + scalar")
+ print_coercion_table(np.typecodes['All'], 0, 0, False)
+ print()
+ print("scalar + neg scalar")
+ print_coercion_table(np.typecodes['All'], 0, -1, False)
+ print()
+ print("array + scalar")
+ print_coercion_table(np.typecodes['All'], 0, 0, True)
+ print()
+ print("array + neg scalar")
+ print_coercion_table(np.typecodes['All'], 0, -1, True)
+ print()
+ print("promote_types")
+ print_coercion_table(np.typecodes['All'], 0, 0, False, True)
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index bf60772d3..44f93a693 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -520,7 +520,7 @@ class TestAlmostEqual(_GenericTest):
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y, decimal=12)
msgs = str(exc_info.value).split('\n')
- assert_equal(msgs[3], 'Mismatch: 100%')
+ assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
assert_equal(
@@ -536,7 +536,7 @@ class TestAlmostEqual(_GenericTest):
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
- assert_equal(msgs[3], 'Mismatch: 33.3%')
+ assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')
assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')
@@ -548,7 +548,7 @@ class TestAlmostEqual(_GenericTest):
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
- assert_equal(msgs[3], 'Mismatch: 50%')
+ assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')
assert_equal(msgs[4], 'Max absolute difference: 1.')
assert_equal(msgs[5], 'Max relative difference: 1.')
assert_equal(msgs[6], ' x: array([inf, 0.])')
@@ -560,10 +560,30 @@ class TestAlmostEqual(_GenericTest):
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
- assert_equal(msgs[3], 'Mismatch: 100%')
+ assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 2')
assert_equal(msgs[5], 'Max relative difference: inf')
+ def test_error_message_2(self):
+ """Check the message is formatted correctly when either x or y is a scalar."""
+ x = 2
+ y = np.ones(20)
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
+ assert_equal(msgs[5], 'Max relative difference: 1.')
+
+ y = 2
+ x = np.ones(20)
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
+ assert_equal(msgs[5], 'Max relative difference: 0.5')
+
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
@@ -855,7 +875,8 @@ class TestAssertAllclose(object):
with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
msg = str(exc_info.value)
- assert_('Mismatch: 25%\nMax absolute difference: 1\n'
+ assert_('Mismatched elements: 1 / 4 (25%)\n'
+ 'Max absolute difference: 1\n'
'Max relative difference: 0.5' in msg)
def test_equal_nan(self):
@@ -880,6 +901,15 @@ class TestAssertAllclose(object):
assert_array_less(a, b)
assert_allclose(a, b)
+ def test_report_max_relative_error(self):
+ a = np.array([0, 1])
+ b = np.array([0, 2])
+
+ with pytest.raises(AssertionError) as exc_info:
+ assert_allclose(a, b)
+ msg = str(exc_info.value)
+ assert_('Max relative difference: 0.5' in msg)
+
class TestArrayAlmostEqualNulp(object):
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 98f19e348..975f6ad5d 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -7,10 +7,11 @@ from __future__ import division, absolute_import, print_function
import warnings
-# 2018-04-04, numpy 1.15.0
+# 2018-04-04, numpy 1.15.0 ImportWarning
+# 2019-09-18, numpy 1.18.0 DeprecatonWarning (changed)
warnings.warn("Importing from numpy.testing.utils is deprecated "
"since 1.15.0, import from numpy.testing instead.",
- ImportWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=2)
from ._private.utils import *
@@ -19,7 +20,7 @@ __all__ = [
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
- 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'raises', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index 807c98652..e3621c0fd 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -1,14 +1,22 @@
from __future__ import division, absolute_import, print_function
import sys
+import subprocess
+import pkgutil
+import types
+import importlib
+import warnings
import numpy as np
+import numpy
import pytest
+
try:
import ctypes
except ImportError:
ctypes = None
+
def check_dir(module, module_name=None):
"""Returns a mapping of all objects with the wrong __module__ attribute."""
if module_name is None:
@@ -26,7 +34,8 @@ def check_dir(module, module_name=None):
sys.version_info[0] < 3,
reason="NumPy exposes slightly different functions on Python 2")
def test_numpy_namespace():
- # None of these objects are publicly documented.
+ # None of these objects are publicly documented to be part of the main
+ # NumPy namespace (some are useful though, others need to be cleaned up)
undocumented = {
'Tester': 'numpy.testing._private.nosetester.NoseTester',
'_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
@@ -69,6 +78,28 @@ def test_numpy_namespace():
assert bad_results == whitelist
+@pytest.mark.parametrize('name', ['testing', 'Tester'])
+def test_import_lazy_import(name):
+ """Make sure we can actually use the modules we lazy load.
+
+ While not exported as part of the public API, it was accessible. With the
+ use of __getattr__ and __dir__, this isn't always true It can happen that
+ an infinite recursion may happen.
+
+ This is the only way I found that would force the failure to appear on the
+ badly implemented code.
+
+ We also test for the presence of the lazily imported modules in dir
+
+ """
+ exe = (sys.executable, '-c', "import numpy; numpy." + name)
+ result = subprocess.check_output(exe)
+ assert not result
+
+ # Make sure they are still in the __dir__
+ assert name in dir(np)
+
+
def test_numpy_linalg():
bad_results = check_dir(np.linalg)
assert bad_results == {}
@@ -78,6 +109,7 @@ def test_numpy_fft():
bad_results = check_dir(np.fft)
assert bad_results == {}
+
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
def test_NPY_NO_EXPORT():
@@ -86,3 +118,381 @@ def test_NPY_NO_EXPORT():
f = getattr(cdll, 'test_not_exported', None)
assert f is None, ("'test_not_exported' is mistakenly exported, "
"NPY_NO_EXPORT does not work")
+
+
+# Historically NumPy has not used leading underscores for private submodules
+# much. This has resulted in lots of things that look like public modules
+# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`),
+# but were never intended to be public. The PUBLIC_MODULES list contains
+# modules that are either public because they were meant to be, or because they
+# contain public functions/objects that aren't present in any other namespace
+# for whatever reason and therefore should be treated as public.
+#
+# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
+# of underscores) but should not be used. For many of those modules the
+# current status is fine. For others it may make sense to work on making them
+# private, to clean up our public API and avoid confusion.
+PUBLIC_MODULES = ['numpy.' + s for s in [
+ "ctypeslib",
+ "distutils",
+ "distutils.cpuinfo",
+ "distutils.exec_command",
+ "distutils.misc_util",
+ "distutils.log",
+ "distutils.system_info",
+ "doc",
+ "doc.basics",
+ "doc.broadcasting",
+ "doc.byteswapping",
+ "doc.constants",
+ "doc.creation",
+ "doc.dispatch",
+ "doc.glossary",
+ "doc.indexing",
+ "doc.internals",
+ "doc.misc",
+ "doc.structured_arrays",
+ "doc.subclassing",
+ "doc.ufuncs",
+ "dual",
+ "f2py",
+ "fft",
+ "lib",
+ "lib.format", # was this meant to be public?
+ "lib.mixins",
+ "lib.recfunctions",
+ "lib.scimath",
+ "linalg",
+ "ma",
+ "ma.extras",
+ "ma.mrecords",
+ "matlib",
+ "polynomial",
+ "polynomial.chebyshev",
+ "polynomial.hermite",
+ "polynomial.hermite_e",
+ "polynomial.laguerre",
+ "polynomial.legendre",
+ "polynomial.polynomial",
+ "polynomial.polyutils",
+ "random",
+ "testing",
+ "version",
+]]
+
+
+PUBLIC_ALIASED_MODULES = [
+ "numpy.char",
+ "numpy.emath",
+ "numpy.rec",
+]
+
+
+PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
+ "compat",
+ "compat.py3k",
+ "conftest",
+ "core",
+ "core.arrayprint",
+ "core.defchararray",
+ "core.einsumfunc",
+ "core.fromnumeric",
+ "core.function_base",
+ "core.getlimits",
+ "core.machar",
+ "core.memmap",
+ "core.multiarray",
+ "core.numeric",
+ "core.numerictypes",
+ "core.overrides",
+ "core.records",
+ "core.shape_base",
+ "core.umath",
+ "core.umath_tests",
+ "distutils.ccompiler",
+ "distutils.command",
+ "distutils.command.autodist",
+ "distutils.command.bdist_rpm",
+ "distutils.command.build",
+ "distutils.command.build_clib",
+ "distutils.command.build_ext",
+ "distutils.command.build_py",
+ "distutils.command.build_scripts",
+ "distutils.command.build_src",
+ "distutils.command.config",
+ "distutils.command.config_compiler",
+ "distutils.command.develop",
+ "distutils.command.egg_info",
+ "distutils.command.install",
+ "distutils.command.install_clib",
+ "distutils.command.install_data",
+ "distutils.command.install_headers",
+ "distutils.command.sdist",
+ "distutils.compat",
+ "distutils.conv_template",
+ "distutils.core",
+ "distutils.extension",
+ "distutils.fcompiler",
+ "distutils.fcompiler.absoft",
+ "distutils.fcompiler.compaq",
+ "distutils.fcompiler.environment",
+ "distutils.fcompiler.g95",
+ "distutils.fcompiler.gnu",
+ "distutils.fcompiler.hpux",
+ "distutils.fcompiler.ibm",
+ "distutils.fcompiler.intel",
+ "distutils.fcompiler.lahey",
+ "distutils.fcompiler.mips",
+ "distutils.fcompiler.nag",
+ "distutils.fcompiler.none",
+ "distutils.fcompiler.pathf95",
+ "distutils.fcompiler.pg",
+ "distutils.fcompiler.sun",
+ "distutils.fcompiler.vast",
+ "distutils.from_template",
+ "distutils.intelccompiler",
+ "distutils.lib2def",
+ "distutils.line_endings",
+ "distutils.mingw32ccompiler",
+ "distutils.msvccompiler",
+ "distutils.npy_pkg_config",
+ "distutils.numpy_distribution",
+ "distutils.pathccompiler",
+ "distutils.unixccompiler",
+ "f2py.auxfuncs",
+ "f2py.capi_maps",
+ "f2py.cb_rules",
+ "f2py.cfuncs",
+ "f2py.common_rules",
+ "f2py.crackfortran",
+ "f2py.diagnose",
+ "f2py.f2py2e",
+ "f2py.f2py_testing",
+ "f2py.f90mod_rules",
+ "f2py.func2subr",
+ "f2py.rules",
+ "f2py.use_rules",
+ "fft.helper",
+ "lib.arraypad",
+ "lib.arraysetops",
+ "lib.arrayterator",
+ "lib.financial",
+ "lib.function_base",
+ "lib.histograms",
+ "lib.index_tricks",
+ "lib.nanfunctions",
+ "lib.npyio",
+ "lib.polynomial",
+ "lib.shape_base",
+ "lib.stride_tricks",
+ "lib.twodim_base",
+ "lib.type_check",
+ "lib.ufunclike",
+ "lib.user_array", # note: not in np.lib, but probably should just be deleted
+ "lib.utils",
+ "linalg.lapack_lite",
+ "linalg.linalg",
+ "ma.bench",
+ "ma.core",
+ "ma.testutils",
+ "ma.timer_comparison",
+ "matrixlib",
+ "matrixlib.defmatrix",
+ "random.bit_generator",
+ "random.bounded_integers",
+ "random.common",
+ "random.generator",
+ "random.mt19937",
+ "random.mtrand",
+ "random.pcg64",
+ "random.philox",
+ "random.sfc64",
+ "testing.print_coercion_tables",
+ "testing.utils",
+]]
+
+
+def is_unexpected(name):
+ """Check if this needs to be considered."""
+ if '._' in name or '.tests' in name or '.setup' in name:
+ return False
+
+ if name in PUBLIC_MODULES:
+ return False
+
+ if name in PUBLIC_ALIASED_MODULES:
+ return False
+
+ if name in PRIVATE_BUT_PRESENT_MODULES:
+ return False
+
+ return True
+
+
+# These are present in a directory with an __init__.py but cannot be imported
+# code_generators/ isn't installed, but present for an inplace build
+SKIP_LIST = [
+ "numpy.core.code_generators",
+ "numpy.core.code_generators.genapi",
+ "numpy.core.code_generators.generate_umath",
+ "numpy.core.code_generators.ufunc_docstrings",
+ "numpy.core.code_generators.generate_numpy_api",
+ "numpy.core.code_generators.generate_ufunc_api",
+ "numpy.core.code_generators.numpy_api",
+ "numpy.core.cversions",
+ "numpy.core.generate_numpy_api",
+ "numpy.distutils.msvc9compiler",
+]
+
+
+def test_all_modules_are_expected():
+ """
+ Test that we don't add anything that looks like a new public module by
+ accident. Check is based on filenames.
+ """
+
+ modnames = []
+ for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__,
+ prefix=np.__name__ + '.',
+ onerror=None):
+ if is_unexpected(modname) and modname not in SKIP_LIST:
+ # We have a name that is new. If that's on purpose, add it to
+ # PUBLIC_MODULES. We don't expect to have to add anything to
+ # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
+ modnames.append(modname)
+
+ if modnames:
+ raise AssertionError("Found unexpected modules: {}".format(modnames))
+
+
+# Stuff that clearly shouldn't be in the API and is detected by the next test
+# below
+SKIP_LIST_2 = [
+ 'numpy.math',
+ 'numpy.distutils.log.sys',
+ 'numpy.distutils.system_info.copy',
+ 'numpy.distutils.system_info.distutils',
+ 'numpy.distutils.system_info.log',
+ 'numpy.distutils.system_info.os',
+ 'numpy.distutils.system_info.platform',
+ 'numpy.distutils.system_info.re',
+ 'numpy.distutils.system_info.shutil',
+ 'numpy.distutils.system_info.subprocess',
+ 'numpy.distutils.system_info.sys',
+ 'numpy.distutils.system_info.tempfile',
+ 'numpy.distutils.system_info.textwrap',
+ 'numpy.distutils.system_info.warnings',
+ 'numpy.doc.constants.re',
+ 'numpy.doc.constants.textwrap',
+ 'numpy.lib.emath',
+ 'numpy.lib.math',
+ 'numpy.matlib.char',
+ 'numpy.matlib.rec',
+ 'numpy.matlib.emath',
+ 'numpy.matlib.math',
+ 'numpy.matlib.linalg',
+ 'numpy.matlib.fft',
+ 'numpy.matlib.random',
+ 'numpy.matlib.ctypeslib',
+ 'numpy.matlib.ma'
+]
+
+
+def test_all_modules_are_expected_2():
+ """
+ Method checking all objects. The pkgutil-based method in
+ `test_all_modules_are_expected` does not catch imports into a namespace,
+ only filenames. So this test is more thorough, and checks this like:
+
+ import .lib.scimath as emath
+
+ To check if something in a module is (effectively) public, one can check if
+ there's anything in that namespace that's a public function/object but is
+ not exposed in a higher-level namespace. For example for a `numpy.lib`
+ submodule::
+
+ mod = np.lib.mixins
+ for obj in mod.__all__:
+ if obj in np.__all__:
+ continue
+ elif obj in np.lib.__all__:
+ continue
+
+ else:
+ print(obj)
+
+ """
+
+ def find_unexpected_members(mod_name):
+ members = []
+ module = importlib.import_module(mod_name)
+ if hasattr(module, '__all__'):
+ objnames = module.__all__
+ else:
+ objnames = dir(module)
+
+ for objname in objnames:
+ if not objname.startswith('_'):
+ fullobjname = mod_name + '.' + objname
+ if isinstance(getattr(module, objname), types.ModuleType):
+ if is_unexpected(fullobjname):
+ if fullobjname not in SKIP_LIST_2:
+ members.append(fullobjname)
+
+ return members
+
+ unexpected_members = find_unexpected_members("numpy")
+ for modname in PUBLIC_MODULES:
+ unexpected_members.extend(find_unexpected_members(modname))
+
+ if unexpected_members:
+ raise AssertionError("Found unexpected object(s) that look like "
+ "modules: {}".format(unexpected_members))
+
+
+def test_api_importable():
+ """
+ Check that all submodules listed higher up in this file can be imported
+
+ Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
+ simply need to be removed from the list (deprecation may or may not be
+ needed - apply common sense).
+ """
+ def check_importable(module_name):
+ try:
+ importlib.import_module(module_name)
+ except (ImportError, AttributeError):
+ return False
+
+ return True
+
+ module_names = []
+ for module_name in PUBLIC_MODULES:
+ if not check_importable(module_name):
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules in the public API that cannot be "
+ "imported: {}".format(module_names))
+
+ for module_name in PUBLIC_ALIASED_MODULES:
+ try:
+ eval(module_name)
+ except AttributeError:
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules in the public API that were not "
+ "found: {}".format(module_names))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ warnings.filterwarnings('always', category=ImportWarning)
+ for module_name in PRIVATE_BUT_PRESENT_MODULES:
+ if not check_importable(module_name):
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules that are not really public but looked "
+ "public and can not be imported: "
+ "{}".format(module_names))
diff --git a/pavement.py b/pavement.py
index 75c862a0b..3637bc66d 100644
--- a/pavement.py
+++ b/pavement.py
@@ -12,8 +12,7 @@ Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
-This automatically put the checksum into README.rst, and write the Changelog
-which can be uploaded to sourceforge.
+This automatically put the checksum into README.rst, and writes the Changelog.
TODO
====
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000..918cbb278
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,71 @@
+[build-system]
+# Minimum requirements for the build system to execute.
+requires = [
+ "setuptools",
+ "wheel",
+ "Cython>=0.29.13", # Note: keep in sync with tools/cythonize.py
+]
+
+
+[tool.towncrier]
+ # Do no set this since it is hard to import numpy inside the source directory
+ # the name is hardcoded. Use "--version 1.18.0" to set the version
+ single_file = true
+ filename = "doc/source/release/{version}-notes.rst"
+ directory = "doc/release/upcoming_changes/"
+ issue_format = "`gh-{issue} <https://github.com/numpy/numpy/pull/{issue}>`__"
+ template = "doc/release/upcoming_changes/template.rst"
+ underlines = "~="
+ all_bullets = false
+
+
+ [[tool.towncrier.type]]
+ directory = "highlight"
+ name = "Highlights"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "new_function"
+ name = "New functions"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "deprecation"
+ name = "Deprecations"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "future"
+ name = "Future Changes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "expired"
+ name = "Expired deprecations"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "compatibility"
+ name = "Compatibility notes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "c_api"
+ name = "C API changes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "new_feature"
+ name = "New Features"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "improvement"
+ name = "Improvements"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "change"
+ name = "Changes"
+ showcontent = true
+
diff --git a/runtests.py b/runtests.py
index 23245aeac..c469f85d8 100755
--- a/runtests.py
+++ b/runtests.py
@@ -18,6 +18,10 @@ Run a debugger:
$ gdb --args python runtests.py [...other args...]
+Disable pytest capturing of output by using its '-s' option:
+
+ $ python runtests.py -- -s
+
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
@@ -67,6 +71,10 @@ def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
+ parser.add_argument("--debug-configure", action="store_true",
+ help=("add -v to build_src to show compiler "
+ "configuration output while creating "
+ "_numpyconfig.h and config.h"))
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
@@ -106,6 +114,8 @@ def main(argv):
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=0,
help="Number of parallel jobs during build")
+ parser.add_argument("--warn-error", action="store_true",
+ help="Set -Werror to convert all compiler warnings to errors")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
@@ -366,6 +376,10 @@ def build_project(args):
cmd += ["build"]
if args.parallel > 1:
cmd += ["-j", str(args.parallel)]
+ if args.debug_configure:
+ cmd += ["build_src", "--verbose"]
+ if args.warn_error:
+ cmd += ["--warn-error"]
# Install; avoid producing eggs so numpy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
diff --git a/setup.py b/setup.py
index a492142ed..068f0f405 100755
--- a/setup.py
+++ b/setup.py
@@ -44,6 +44,7 @@ Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
+Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
@@ -82,6 +83,10 @@ def git_version():
except (subprocess.SubprocessError, OSError):
GIT_REVISION = "Unknown"
+ if not GIT_REVISION:
+ # this shouldn't happen but apparently can (see gh-8512)
+ GIT_REVISION = "Unknown"
+
return GIT_REVISION
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
@@ -262,7 +267,7 @@ def parse_setuppy_commands():
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
- 'bdist_wininst', 'bdist_msi', 'bdist_mpkg')
+ 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src')
for command in good_commands:
if command in args:
@@ -364,7 +369,7 @@ def parse_setuppy_commands():
def setup_package():
- src_path = os.path.dirname(os.path.abspath(sys.argv[0]))
+ src_path = os.path.dirname(os.path.abspath(__file__))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
@@ -402,7 +407,8 @@ def setup_package():
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
- cmdclass={"sdist": sdist_checked},
+ cmdclass={"sdist": sdist_checked,
+ },
python_requires='>=3.5',
zip_safe=False,
entry_points={
@@ -421,8 +427,8 @@ def setup_package():
if run_build:
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
- if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
- # Generate Cython sources, unless building from source release
+ if not 'sdist' in sys.argv:
+ # Generate Cython sources, unless we're generating an sdist
generate_cython()
metadata['configuration'] = configuration
diff --git a/shippable.yml b/shippable.yml
index 88e131f48..91323ceb6 100644
--- a/shippable.yml
+++ b/shippable.yml
@@ -31,9 +31,7 @@ build:
# we will pay the ~13 minute cost of compiling Cython only when a new
# version is scraped in by pip; otherwise, use the cached
# wheel shippable places on Amazon S3 after we build it once
- - pip install cython --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION
- # install pytz for datetime testing
- - pip install pytz
+ - pip install -r test_requirements.txt --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION
# install pytest-xdist to leverage a second core
# for unit tests
- pip install pytest-xdist
@@ -43,14 +41,14 @@ build:
# build first and adjust PATH so f2py is found in scripts dir
# use > 1 core for build sometimes slows down a fair bit,
# other times modestly speeds up, so avoid for now
- - python setup.py install
+ - pip install .
- extra_directories=($SHIPPABLE_REPO_DIR/build/*scripts*)
- extra_path=$(printf "%s:" "${extra_directories[@]}")
- export PATH="${extra_path}${PATH}"
# check OpenBLAS version
- - python tools/openblas_support.py --check_version 0.3.7.dev
+ - python tools/openblas_support.py --check_version 0.3.7
# run the test suite
- - python runtests.py -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10
+ - python runtests.py --debug-configure --show-build-log -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10
cache: true
cache_dir_list:
diff --git a/test_requirements.txt b/test_requirements.txt
new file mode 100644
index 000000000..2d52599b1
--- /dev/null
+++ b/test_requirements.txt
@@ -0,0 +1,7 @@
+cython==0.29.13
+pytest==5.1.3
+pytz==2019.2
+pytest-cov==2.7.1
+pickle5; python_version == '3.7'
+pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy'
+nose
diff --git a/tools/ci/appveyor/requirements.txt b/tools/ci/appveyor/requirements.txt
deleted file mode 100644
index fba8260da..000000000
--- a/tools/ci/appveyor/requirements.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-cython
-nose
-pytest-timeout
-pytest-xdist
-pytest-env
-pytest-faulthandler
diff --git a/tools/ci/test_all_newsfragments_used.py b/tools/ci/test_all_newsfragments_used.py
new file mode 100755
index 000000000..6c4591fd8
--- /dev/null
+++ b/tools/ci/test_all_newsfragments_used.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+import sys
+import toml
+import os
+
+path = toml.load("pyproject.toml")["tool"]["towncrier"]["directory"]
+
+fragments = os.listdir(path)
+fragments.remove("README.rst")
+fragments.remove("template.rst")
+
+if fragments:
+ print("The following files were not found by towncrier:")
+ print(" " + " \n".join(fragments))
+ sys.exit(1)
diff --git a/tools/cythonize.py b/tools/cythonize.py
index c81b72d25..5bea2d4ec 100755
--- a/tools/cythonize.py
+++ b/tools/cythonize.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
""" cythonize
Cythonize pyx files into C files as needed.
@@ -54,30 +54,25 @@ except NameError:
def process_pyx(fromfile, tofile):
flags = ['-3', '--fast-fail']
if tofile.endswith('.cxx'):
- flags += ['--cplus']
+ flags.append('--cplus')
try:
# try the cython in the installed python first (somewhat related to scipy/scipy#2397)
from Cython.Compiler.Version import version as cython_version
except ImportError:
- # if that fails, use the one on the path, which might be the wrong version
- try:
- # Try the one on the path as a last resort
- subprocess.check_call(
- ['cython'] + flags + ["-o", tofile, fromfile])
- except OSError:
- raise OSError('Cython needs to be installed')
+ # The `cython` command need not point to the version installed in the
+ # Python running this script, so raise an error to avoid the chance of
+ # using the wrong version of Cython.
+ raise OSError('Cython needs to be installed in Python as a module')
else:
# check the version, and invoke through python
from distutils.version import LooseVersion
- # requiring the newest version on all pythons doesn't work, since
- # we're relying on the version of the distribution cython. Add new
- # versions as they become required for new python versions.
- if sys.version_info[:2] < (3, 7):
- required_version = LooseVersion('0.19')
- else:
- required_version = LooseVersion('0.28')
+ # Cython 0.29.13 is required for Python 3.8 and there are
+ # other fixes in the 0.29 series that are needed even for earlier
+ # Python versions.
+ # Note: keep in sync with that in pyproject.toml
+ required_version = LooseVersion('0.29.13')
if LooseVersion(cython_version) < required_version:
raise RuntimeError('Building {} requires Cython >= {}'.format(
diff --git a/tools/npy_tempita/compat3.py b/tools/npy_tempita/compat3.py
index eb890ca14..01d771345 100644
--- a/tools/npy_tempita/compat3.py
+++ b/tools/npy_tempita/compat3.py
@@ -5,7 +5,7 @@ import sys
__all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode',
'iteritems']
-PY3 = True if sys.version_info[0] == 3 else False
+PY3 = True if sys.version_info[0] >= 3 else False
if sys.version_info[0] < 3:
diff --git a/tools/openblas_support.py b/tools/openblas_support.py
index ac4abb3f0..964adce6e 100644
--- a/tools/openblas_support.py
+++ b/tools/openblas_support.py
@@ -14,8 +14,8 @@ from tempfile import mkstemp, gettempdir
import zipfile
import tarfile
-OPENBLAS_V = '6a8b426'
-OPENBLAS_LONG = 'v0.3.5-274-g6a8b4269'
+OPENBLAS_V = 'v0.3.7'
+OPENBLAS_LONG = 'v0.3.7'
BASE_LOC = ''
RACKSPACE = 'https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com'
ARCHITECTURES = ['', 'windows', 'darwin', 'arm', 'x86', 'ppc64']
@@ -47,7 +47,7 @@ def download_openblas(target, arch):
# https://github.com/tylerjereddy/openblas-static-gcc/tree/master/ARMv8
# build done on GCC compile farm machine named gcc115
# tarball uploaded manually to an unshared Dropbox location
- filename = ('https://www.dropbox.com/s/zsp1wb3tq4n9g0b/'
+ filename = ('https://www.dropbox.com/s/vdeckao4omss187/'
'openblas-{}-armv8.tar.gz?dl=1'.format(OPENBLAS_V))
typ = 'tar.gz'
elif arch == 'ppc64':
@@ -55,7 +55,7 @@ def download_openblas(target, arch):
# https://github.com/tylerjereddy/openblas-static-gcc/blob/master/power8
# built on GCC compile farm machine named gcc112
# manually uploaded tarball to an unshared Dropbox location
- filename = ('https://www.dropbox.com/s/k9uabwoi8bekjwe/'
+ filename = ('https://www.dropbox.com/s/yt0d2j86x1j8nh1/'
'openblas-{}-ppc64le-power8.tar.gz?dl=1'.format(OPENBLAS_V))
typ = 'tar.gz'
elif arch == 'darwin':
diff --git a/tools/pypy-test.sh b/tools/pypy-test.sh
index 038748af9..b02d18778 100755
--- a/tools/pypy-test.sh
+++ b/tools/pypy-test.sh
@@ -32,14 +32,14 @@ mkdir -p pypy3
(cd pypy3; tar --strip-components=1 -xf ../pypy.tar.bz2)
pypy3/bin/pypy3 -mensurepip
pypy3/bin/pypy3 -m pip install --upgrade pip setuptools
-pypy3/bin/pypy3 -m pip install --user cython==0.29.0 pytest pytz --no-warn-script-location
+pypy3/bin/pypy3 -m pip install --user -r test_requirements.txt --no-warn-script-location
echo
echo pypy3 version
pypy3/bin/pypy3 -c "import sys; print(sys.version)"
echo
-pypy3/bin/pypy3 runtests.py --show-build-log -- -rsx \
+pypy3/bin/pypy3 runtests.py --debug-configure --show-build-log -v -- -rsx \
--junitxml=junit/test-results.xml --durations 10
echo Make sure the correct openblas has been linked in
diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py
index 0037dc9b3..e8bf711c5 100755
--- a/tools/swig/test/testFarray.py
+++ b/tools/swig/test/testFarray.py
@@ -15,7 +15,7 @@ else: BadListError = ValueError
# Add the distutils-generated build directory to the python search path and then
# import the extension module
-libDir = "lib.%s-%s" % (get_platform(), sys.version[:3])
+libDir = "lib.{}-{}.{}".format(get_platform(), *sys.version_info[:2])
sys.path.insert(0, os.path.join("build", libDir))
import Farray
diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh
index b1c1f2ca1..072ad3bf6 100755
--- a/tools/travis-before-install.sh
+++ b/tools/travis-before-install.sh
@@ -30,12 +30,8 @@ fi
source venv/bin/activate
python -V
-if [ -n "$INSTALL_PICKLE5" ]; then
- pip install pickle5
-fi
-
+popd
pip install --upgrade pip setuptools
-pip install nose pytz cython pytest
+pip install -r test_requirements.txt
if [ -n "$USE_ASV" ]; then pip install asv; fi
-popd
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index d056ac69c..472f5987d 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -32,7 +32,7 @@ werrors="$werrors -Werror=implicit-function-declaration"
setup_base()
{
- # use default python flags but remoge sign-compare
+ # use default python flags but remove sign-compare
sysflags="$($PYTHON -c "from distutils import sysconfig; \
print (sysconfig.get_config_var('CFLAGS'))")"
export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare"
@@ -52,7 +52,7 @@ setup_base()
else
# Python3.5-dbg on travis seems to need this
export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized"
- $PYTHON setup.py build_ext --inplace 2>&1 | tee log
+ $PYTHON setup.py build build_src -v build_ext --inplace 2>&1 | tee log
fi
grep -v "_configtest" log \
| grep -vE "ld returned 1|no previously-included files matching|manifest_maker: standard file '-c'" \
@@ -65,12 +65,12 @@ setup_base()
run_test()
{
+ $PIP install -r test_requirements.txt
if [ -n "$USE_DEBUG" ]; then
export PYTHONPATH=$PWD
fi
if [ -n "$RUN_COVERAGE" ]; then
- $PIP install pytest-cov
COVERAGE_FLAG=--coverage
fi
@@ -90,7 +90,9 @@ run_test()
export PYTHONWARNINGS="ignore::DeprecationWarning:virtualenv"
$PYTHON ../runtests.py -n -v --durations 10 --mode=full $COVERAGE_FLAG
else
- $PYTHON ../runtests.py -n -v --durations 10
+ # disable --durations temporarily, pytest currently aborts
+ # when that is used with python3.6-dbg
+ $PYTHON ../runtests.py -n -v # --durations 10
fi
if [ -n "$RUN_COVERAGE" ]; then
@@ -149,21 +151,17 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
export F90='gfortran --coverage'
export LDFLAGS='--coverage'
fi
- $PYTHON setup.py bdist_wheel
+ $PYTHON setup.py build build_src -v bdist_wheel
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
$PIP install --pre --no-index --upgrade --find-links=. numpy
- $PIP install nose pytest
-
- if [ -n "$INSTALL_PICKLE5" ]; then
- $PIP install pickle5
- fi
-
popd
+
run_test
+
elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
# use an up-to-date pip / setuptools inside the venv
$PIP install -U virtualenv
@@ -180,11 +178,6 @@ elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
# Move out of source directory to avoid finding local numpy
pushd dist
$PIP install numpy*
- $PIP install nose pytest
- if [ -n "$INSTALL_PICKLE5" ]; then
- $PIP install pickle5
- fi
-
popd
run_test
else
diff --git a/tox.ini b/tox.ini
index 3223b9e1b..a38a03c97 100644
--- a/tox.ini
+++ b/tox.ini
@@ -30,8 +30,7 @@ envlist =
py37-not-relaxed-strides
[testenv]
-deps=
- pytest
+deps= -Ur{toxinidir}/test_requirements.txt
changedir={envdir}
commands={envpython} {toxinidir}/runtests.py --mode=full {posargs:}